[llvm] [RISCV] Support `llvm.masked.compressstore` intrinsic (PR #83457)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 29 10:42:54 PST 2024


================
@@ -0,0 +1,17545 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mtriple=riscv64 -mattr=+v,+d %s -o - | FileCheck %s --check-prefix=RV64
+; RUN: llc -verify-machineinstrs -mtriple=riscv32 -mattr=+v,+d %s -o - | FileCheck %s --check-prefix=RV32
+
+; Compress + store for i8 type
+
+define void @test_compresstore_i8_v1(ptr %p, <1 x i1> %mask, <1 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v1:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV64-NEXT:    vcompress.vm v9, v8, v0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_compresstore_i8_v1:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV32-NEXT:    vcompress.vm v9, v8, v0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    ret
+entry:
+  tail call void @llvm.masked.compressstore.v1i8(<1 x i8> %data, ptr %p, <1 x i1> %mask)
+  ret void
+}
+
+define void @test_compresstore_i8_v2(ptr %p, <2 x i1> %mask, <2 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v2:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; RV64-NEXT:    vcompress.vm v9, v8, v0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_compresstore_i8_v2:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; RV32-NEXT:    vcompress.vm v9, v8, v0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    ret
+entry:
+  tail call void @llvm.masked.compressstore.v2i8(<2 x i8> %data, ptr %p, <2 x i1> %mask)
+  ret void
+}
+
+define void @test_compresstore_i8_v4(ptr %p, <4 x i1> %mask, <4 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v4:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; RV64-NEXT:    vcompress.vm v9, v8, v0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_compresstore_i8_v4:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
+; RV32-NEXT:    vcompress.vm v9, v8, v0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    ret
+entry:
+  tail call void @llvm.masked.compressstore.v4i8(<4 x i8> %data, ptr %p, <4 x i1> %mask)
+  ret void
+}
+
+define void @test_compresstore_i8_v8(ptr %p, <8 x i1> %mask, <8 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v8:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64-NEXT:    vcompress.vm v9, v8, v0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_compresstore_i8_v8:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT:    vcompress.vm v9, v8, v0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    ret
+entry:
+  tail call void @llvm.masked.compressstore.v8i8(<8 x i8> %data, ptr %p, <8 x i1> %mask)
+  ret void
+}
+
+define void @test_compresstore_i8_v16(ptr %p, <16 x i1> %mask, <16 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v16:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT:    vcompress.vm v9, v8, v0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_compresstore_i8_v16:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT:    vcompress.vm v9, v8, v0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    ret
+entry:
+  tail call void @llvm.masked.compressstore.v16i8(<16 x i8> %data, ptr %p, <16 x i1> %mask)
+  ret void
+}
+
+define void @test_compresstore_i8_v32(ptr %p, <32 x i1> %mask, <32 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v32:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    li a1, 32
+; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT:    vcompress.vm v10, v8, v0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_compresstore_i8_v32:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT:    vcompress.vm v10, v8, v0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    ret
+entry:
+  tail call void @llvm.masked.compressstore.v32i8(<32 x i8> %data, ptr %p, <32 x i1> %mask)
+  ret void
+}
+
+define void @test_compresstore_i8_v64(ptr %p, <64 x i1> %mask, <64 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v64:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    li a1, 64
+; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT:    vcompress.vm v12, v8, v0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT:    vse8.v v12, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_compresstore_i8_v64:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    li a1, 64
+; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT:    vcompress.vm v12, v8, v0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT:    vse8.v v12, (a0)
+; RV32-NEXT:    ret
+entry:
+  tail call void @llvm.masked.compressstore.v64i8(<64 x i8> %data, ptr %p, <64 x i1> %mask)
+  ret void
+}
+
+define void @test_compresstore_i8_v128(ptr %p, <128 x i1> %mask, <128 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v128:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vcompress.vm v16, v8, v0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a0)
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_compresstore_i8_v128:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    li a1, 128
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT:    vcompress.vm v16, v8, v0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a0)
+; RV32-NEXT:    ret
+entry:
+  tail call void @llvm.masked.compressstore.v128i8(<128 x i8> %data, ptr %p, <128 x i1> %mask)
+  ret void
+}
+
+define void @test_compresstore_i8_v256(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
+; RV64-LABEL: test_compresstore_i8_v256:
+; RV64:       # %bb.0: # %entry
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vle8.v v24, (a1)
+; RV64-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a2, v0
+; RV64-NEXT:    andi a1, a2, 1
+; RV64-NEXT:    bnez a1, .LBB8_273
+; RV64-NEXT:  # %bb.1: # %else
+; RV64-NEXT:    andi a1, a2, 2
+; RV64-NEXT:    bnez a1, .LBB8_274
+; RV64-NEXT:  .LBB8_2: # %else2
+; RV64-NEXT:    andi a1, a2, 4
+; RV64-NEXT:    bnez a1, .LBB8_275
+; RV64-NEXT:  .LBB8_3: # %else5
+; RV64-NEXT:    andi a1, a2, 8
+; RV64-NEXT:    bnez a1, .LBB8_276
+; RV64-NEXT:  .LBB8_4: # %else8
+; RV64-NEXT:    andi a1, a2, 16
+; RV64-NEXT:    bnez a1, .LBB8_277
+; RV64-NEXT:  .LBB8_5: # %else11
+; RV64-NEXT:    andi a1, a2, 32
+; RV64-NEXT:    bnez a1, .LBB8_278
+; RV64-NEXT:  .LBB8_6: # %else14
+; RV64-NEXT:    andi a1, a2, 64
+; RV64-NEXT:    bnez a1, .LBB8_279
+; RV64-NEXT:  .LBB8_7: # %else17
+; RV64-NEXT:    andi a1, a2, 128
+; RV64-NEXT:    bnez a1, .LBB8_280
+; RV64-NEXT:  .LBB8_8: # %else20
+; RV64-NEXT:    andi a1, a2, 256
+; RV64-NEXT:    bnez a1, .LBB8_281
+; RV64-NEXT:  .LBB8_9: # %else23
+; RV64-NEXT:    andi a1, a2, 512
+; RV64-NEXT:    bnez a1, .LBB8_282
+; RV64-NEXT:  .LBB8_10: # %else26
+; RV64-NEXT:    andi a1, a2, 1024
+; RV64-NEXT:    bnez a1, .LBB8_283
+; RV64-NEXT:  .LBB8_11: # %else29
+; RV64-NEXT:    slli a1, a2, 52
+; RV64-NEXT:    bltz a1, .LBB8_284
+; RV64-NEXT:  .LBB8_12: # %else32
+; RV64-NEXT:    slli a1, a2, 51
+; RV64-NEXT:    bltz a1, .LBB8_285
+; RV64-NEXT:  .LBB8_13: # %else35
+; RV64-NEXT:    slli a1, a2, 50
+; RV64-NEXT:    bltz a1, .LBB8_286
+; RV64-NEXT:  .LBB8_14: # %else38
+; RV64-NEXT:    slli a1, a2, 49
+; RV64-NEXT:    bltz a1, .LBB8_287
+; RV64-NEXT:  .LBB8_15: # %else41
+; RV64-NEXT:    slli a1, a2, 48
+; RV64-NEXT:    bltz a1, .LBB8_288
+; RV64-NEXT:  .LBB8_16: # %else44
+; RV64-NEXT:    slli a1, a2, 47
+; RV64-NEXT:    bltz a1, .LBB8_289
+; RV64-NEXT:  .LBB8_17: # %else47
+; RV64-NEXT:    slli a1, a2, 46
+; RV64-NEXT:    bltz a1, .LBB8_290
+; RV64-NEXT:  .LBB8_18: # %else50
+; RV64-NEXT:    slli a1, a2, 45
+; RV64-NEXT:    bltz a1, .LBB8_291
+; RV64-NEXT:  .LBB8_19: # %else53
+; RV64-NEXT:    slli a1, a2, 44
+; RV64-NEXT:    bltz a1, .LBB8_292
+; RV64-NEXT:  .LBB8_20: # %else56
+; RV64-NEXT:    slli a1, a2, 43
+; RV64-NEXT:    bltz a1, .LBB8_293
+; RV64-NEXT:  .LBB8_21: # %else59
+; RV64-NEXT:    slli a1, a2, 42
+; RV64-NEXT:    bltz a1, .LBB8_294
+; RV64-NEXT:  .LBB8_22: # %else62
+; RV64-NEXT:    slli a1, a2, 41
+; RV64-NEXT:    bgez a1, .LBB8_23
+; RV64-NEXT:    j .LBB8_295
+; RV64-NEXT:  .LBB8_23: # %else65
+; RV64-NEXT:    slli a1, a2, 40
+; RV64-NEXT:    bgez a1, .LBB8_24
+; RV64-NEXT:    j .LBB8_296
+; RV64-NEXT:  .LBB8_24: # %else68
+; RV64-NEXT:    slli a1, a2, 39
+; RV64-NEXT:    bgez a1, .LBB8_25
+; RV64-NEXT:    j .LBB8_297
+; RV64-NEXT:  .LBB8_25: # %else71
+; RV64-NEXT:    slli a1, a2, 38
+; RV64-NEXT:    bgez a1, .LBB8_26
+; RV64-NEXT:    j .LBB8_298
+; RV64-NEXT:  .LBB8_26: # %else74
+; RV64-NEXT:    slli a1, a2, 37
+; RV64-NEXT:    bgez a1, .LBB8_27
+; RV64-NEXT:    j .LBB8_299
+; RV64-NEXT:  .LBB8_27: # %else77
+; RV64-NEXT:    slli a1, a2, 36
+; RV64-NEXT:    bgez a1, .LBB8_28
+; RV64-NEXT:    j .LBB8_300
+; RV64-NEXT:  .LBB8_28: # %else80
+; RV64-NEXT:    slli a1, a2, 35
+; RV64-NEXT:    bgez a1, .LBB8_29
+; RV64-NEXT:    j .LBB8_301
+; RV64-NEXT:  .LBB8_29: # %else83
+; RV64-NEXT:    slli a1, a2, 34
+; RV64-NEXT:    bgez a1, .LBB8_30
+; RV64-NEXT:    j .LBB8_302
+; RV64-NEXT:  .LBB8_30: # %else86
+; RV64-NEXT:    slli a1, a2, 33
+; RV64-NEXT:    bgez a1, .LBB8_31
+; RV64-NEXT:    j .LBB8_303
+; RV64-NEXT:  .LBB8_31: # %else89
+; RV64-NEXT:    slli a1, a2, 32
+; RV64-NEXT:    bgez a1, .LBB8_33
+; RV64-NEXT:  .LBB8_32: # %cond.store91
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 31
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:  .LBB8_33: # %else92
+; RV64-NEXT:    addi sp, sp, -2032
+; RV64-NEXT:    .cfi_def_cfa_offset 2032
+; RV64-NEXT:    sd ra, 2024(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 2016(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    addi s0, sp, 2032
+; RV64-NEXT:    .cfi_def_cfa s0, 0
+; RV64-NEXT:    lui a1, 6
+; RV64-NEXT:    addiw a1, a1, -1776
+; RV64-NEXT:    sub sp, sp, a1
+; RV64-NEXT:    andi sp, sp, -128
+; RV64-NEXT:    slli a3, a2, 31
+; RV64-NEXT:    lui a1, 6
+; RV64-NEXT:    addiw a1, a1, -984
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    bgez a3, .LBB8_34
+; RV64-NEXT:    j .LBB8_304
+; RV64-NEXT:  .LBB8_34: # %else95
+; RV64-NEXT:    slli a3, a2, 30
+; RV64-NEXT:    bgez a3, .LBB8_35
+; RV64-NEXT:    j .LBB8_305
+; RV64-NEXT:  .LBB8_35: # %else98
+; RV64-NEXT:    slli a3, a2, 29
+; RV64-NEXT:    bgez a3, .LBB8_36
+; RV64-NEXT:    j .LBB8_306
+; RV64-NEXT:  .LBB8_36: # %else101
+; RV64-NEXT:    slli a3, a2, 28
+; RV64-NEXT:    bgez a3, .LBB8_37
+; RV64-NEXT:    j .LBB8_307
+; RV64-NEXT:  .LBB8_37: # %else104
+; RV64-NEXT:    slli a3, a2, 27
+; RV64-NEXT:    bgez a3, .LBB8_38
+; RV64-NEXT:    j .LBB8_308
+; RV64-NEXT:  .LBB8_38: # %else107
+; RV64-NEXT:    slli a3, a2, 26
+; RV64-NEXT:    bgez a3, .LBB8_39
+; RV64-NEXT:    j .LBB8_309
+; RV64-NEXT:  .LBB8_39: # %else110
+; RV64-NEXT:    slli a3, a2, 25
+; RV64-NEXT:    bgez a3, .LBB8_40
+; RV64-NEXT:    j .LBB8_310
+; RV64-NEXT:  .LBB8_40: # %else113
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    bgez a3, .LBB8_41
+; RV64-NEXT:    j .LBB8_311
+; RV64-NEXT:  .LBB8_41: # %else116
+; RV64-NEXT:    slli a3, a2, 23
+; RV64-NEXT:    bgez a3, .LBB8_43
+; RV64-NEXT:  .LBB8_42: # %cond.store118
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 23
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a1, 0(a1)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:  .LBB8_43: # %else119
+; RV64-NEXT:    slli a3, a2, 22
+; RV64-NEXT:    lui a1, 5
+; RV64-NEXT:    addiw a1, a1, 953
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    bgez a3, .LBB8_44
+; RV64-NEXT:    j .LBB8_312
+; RV64-NEXT:  .LBB8_44: # %else122
+; RV64-NEXT:    slli a3, a2, 21
+; RV64-NEXT:    bgez a3, .LBB8_45
+; RV64-NEXT:    j .LBB8_313
+; RV64-NEXT:  .LBB8_45: # %else125
+; RV64-NEXT:    slli a3, a2, 20
+; RV64-NEXT:    bgez a3, .LBB8_46
+; RV64-NEXT:    j .LBB8_314
+; RV64-NEXT:  .LBB8_46: # %else128
+; RV64-NEXT:    slli a3, a2, 19
+; RV64-NEXT:    bgez a3, .LBB8_47
+; RV64-NEXT:    j .LBB8_315
+; RV64-NEXT:  .LBB8_47: # %else131
+; RV64-NEXT:    slli a3, a2, 18
+; RV64-NEXT:    bgez a3, .LBB8_48
+; RV64-NEXT:    j .LBB8_316
+; RV64-NEXT:  .LBB8_48: # %else134
+; RV64-NEXT:    slli a3, a2, 17
+; RV64-NEXT:    bgez a3, .LBB8_49
+; RV64-NEXT:    j .LBB8_317
+; RV64-NEXT:  .LBB8_49: # %else137
+; RV64-NEXT:    slli a3, a2, 16
+; RV64-NEXT:    bgez a3, .LBB8_50
+; RV64-NEXT:    j .LBB8_318
+; RV64-NEXT:  .LBB8_50: # %else140
+; RV64-NEXT:    slli a3, a2, 15
+; RV64-NEXT:    bgez a3, .LBB8_51
+; RV64-NEXT:    j .LBB8_319
+; RV64-NEXT:  .LBB8_51: # %else143
+; RV64-NEXT:    slli a3, a2, 14
+; RV64-NEXT:    bgez a3, .LBB8_52
+; RV64-NEXT:    j .LBB8_320
+; RV64-NEXT:  .LBB8_52: # %else146
+; RV64-NEXT:    slli a3, a2, 13
+; RV64-NEXT:    bgez a3, .LBB8_53
+; RV64-NEXT:    j .LBB8_321
+; RV64-NEXT:  .LBB8_53: # %else149
+; RV64-NEXT:    slli a3, a2, 12
+; RV64-NEXT:    bgez a3, .LBB8_54
+; RV64-NEXT:    j .LBB8_322
+; RV64-NEXT:  .LBB8_54: # %else152
+; RV64-NEXT:    slli a3, a2, 11
+; RV64-NEXT:    bgez a3, .LBB8_55
+; RV64-NEXT:    j .LBB8_323
+; RV64-NEXT:  .LBB8_55: # %else155
+; RV64-NEXT:    slli a3, a2, 10
+; RV64-NEXT:    bgez a3, .LBB8_56
+; RV64-NEXT:    j .LBB8_324
+; RV64-NEXT:  .LBB8_56: # %else158
+; RV64-NEXT:    slli a3, a2, 9
+; RV64-NEXT:    bgez a3, .LBB8_57
+; RV64-NEXT:    j .LBB8_325
+; RV64-NEXT:  .LBB8_57: # %else161
+; RV64-NEXT:    slli a3, a2, 8
+; RV64-NEXT:    bgez a3, .LBB8_58
+; RV64-NEXT:    j .LBB8_326
+; RV64-NEXT:  .LBB8_58: # %else164
+; RV64-NEXT:    slli a3, a2, 7
+; RV64-NEXT:    bgez a3, .LBB8_59
+; RV64-NEXT:    j .LBB8_327
+; RV64-NEXT:  .LBB8_59: # %else167
+; RV64-NEXT:    slli a3, a2, 6
+; RV64-NEXT:    bgez a3, .LBB8_61
+; RV64-NEXT:  .LBB8_60: # %cond.store169
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a1, 0(a1)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_61: # %else170
+; RV64-NEXT:    slli a1, a2, 5
+; RV64-NEXT:    lui a3, 5
+; RV64-NEXT:    addiw a3, a3, -1206
+; RV64-NEXT:    add a3, sp, a3
+; RV64-NEXT:    bgez a1, .LBB8_62
+; RV64-NEXT:    j .LBB8_328
+; RV64-NEXT:  .LBB8_62: # %else173
+; RV64-NEXT:    slli a1, a2, 4
+; RV64-NEXT:    bgez a1, .LBB8_63
+; RV64-NEXT:    j .LBB8_329
+; RV64-NEXT:  .LBB8_63: # %else176
+; RV64-NEXT:    slli a1, a2, 3
+; RV64-NEXT:    bgez a1, .LBB8_64
+; RV64-NEXT:    j .LBB8_330
+; RV64-NEXT:  .LBB8_64: # %else179
+; RV64-NEXT:    slli a1, a2, 2
+; RV64-NEXT:    bgez a1, .LBB8_66
+; RV64-NEXT:  .LBB8_65: # %cond.store181
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a1, 1651(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:  .LBB8_66: # %else182
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    slli a1, a2, 1
+; RV64-NEXT:    vslidedown.vi v9, v0, 1
+; RV64-NEXT:    bgez a1, .LBB8_68
+; RV64-NEXT:  # %bb.67: # %cond.store184
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 256
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a1, 1524(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:  .LBB8_68: # %else185
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v9
+; RV64-NEXT:    bgez a2, .LBB8_69
+; RV64-NEXT:    j .LBB8_331
+; RV64-NEXT:  .LBB8_69: # %else188
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    beqz a2, .LBB8_70
+; RV64-NEXT:    j .LBB8_332
+; RV64-NEXT:  .LBB8_70: # %else191
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB8_71
+; RV64-NEXT:    j .LBB8_333
+; RV64-NEXT:  .LBB8_71: # %else194
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB8_72
+; RV64-NEXT:    j .LBB8_334
+; RV64-NEXT:  .LBB8_72: # %else197
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB8_73
+; RV64-NEXT:    j .LBB8_335
+; RV64-NEXT:  .LBB8_73: # %else200
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB8_74
+; RV64-NEXT:    j .LBB8_336
+; RV64-NEXT:  .LBB8_74: # %else203
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB8_75
+; RV64-NEXT:    j .LBB8_337
+; RV64-NEXT:  .LBB8_75: # %else206
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB8_76
+; RV64-NEXT:    j .LBB8_338
+; RV64-NEXT:  .LBB8_76: # %else209
+; RV64-NEXT:    andi a2, a1, 128
+; RV64-NEXT:    beqz a2, .LBB8_77
+; RV64-NEXT:    j .LBB8_339
+; RV64-NEXT:  .LBB8_77: # %else212
+; RV64-NEXT:    andi a2, a1, 256
+; RV64-NEXT:    beqz a2, .LBB8_78
+; RV64-NEXT:    j .LBB8_340
+; RV64-NEXT:  .LBB8_78: # %else215
+; RV64-NEXT:    andi a2, a1, 512
+; RV64-NEXT:    beqz a2, .LBB8_79
+; RV64-NEXT:    j .LBB8_341
+; RV64-NEXT:  .LBB8_79: # %else218
+; RV64-NEXT:    andi a2, a1, 1024
+; RV64-NEXT:    beqz a2, .LBB8_81
+; RV64-NEXT:  .LBB8_80: # %cond.store220
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -1280
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 0(a3)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_81: # %else221
+; RV64-NEXT:    slli a3, a1, 52
+; RV64-NEXT:    lui a2, 4
+; RV64-NEXT:    addiw a2, a2, 731
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    bgez a3, .LBB8_82
+; RV64-NEXT:    j .LBB8_342
+; RV64-NEXT:  .LBB8_82: # %else224
+; RV64-NEXT:    slli a3, a1, 51
+; RV64-NEXT:    bgez a3, .LBB8_83
+; RV64-NEXT:    j .LBB8_343
+; RV64-NEXT:  .LBB8_83: # %else227
+; RV64-NEXT:    slli a3, a1, 50
+; RV64-NEXT:    bgez a3, .LBB8_84
+; RV64-NEXT:    j .LBB8_344
+; RV64-NEXT:  .LBB8_84: # %else230
+; RV64-NEXT:    slli a3, a1, 49
+; RV64-NEXT:    bgez a3, .LBB8_85
+; RV64-NEXT:    j .LBB8_345
+; RV64-NEXT:  .LBB8_85: # %else233
+; RV64-NEXT:    slli a3, a1, 48
+; RV64-NEXT:    bgez a3, .LBB8_86
+; RV64-NEXT:    j .LBB8_346
+; RV64-NEXT:  .LBB8_86: # %else236
+; RV64-NEXT:    slli a3, a1, 47
+; RV64-NEXT:    bgez a3, .LBB8_87
+; RV64-NEXT:    j .LBB8_347
+; RV64-NEXT:  .LBB8_87: # %else239
+; RV64-NEXT:    slli a3, a1, 46
+; RV64-NEXT:    bgez a3, .LBB8_88
+; RV64-NEXT:    j .LBB8_348
+; RV64-NEXT:  .LBB8_88: # %else242
+; RV64-NEXT:    slli a3, a1, 45
+; RV64-NEXT:    bgez a3, .LBB8_89
+; RV64-NEXT:    j .LBB8_349
+; RV64-NEXT:  .LBB8_89: # %else245
+; RV64-NEXT:    slli a3, a1, 44
+; RV64-NEXT:    bgez a3, .LBB8_90
+; RV64-NEXT:    j .LBB8_350
+; RV64-NEXT:  .LBB8_90: # %else248
+; RV64-NEXT:    slli a3, a1, 43
+; RV64-NEXT:    bgez a3, .LBB8_91
+; RV64-NEXT:    j .LBB8_351
+; RV64-NEXT:  .LBB8_91: # %else251
+; RV64-NEXT:    slli a3, a1, 42
+; RV64-NEXT:    bgez a3, .LBB8_92
+; RV64-NEXT:    j .LBB8_352
+; RV64-NEXT:  .LBB8_92: # %else254
+; RV64-NEXT:    slli a3, a1, 41
+; RV64-NEXT:    bgez a3, .LBB8_93
+; RV64-NEXT:    j .LBB8_353
+; RV64-NEXT:  .LBB8_93: # %else257
+; RV64-NEXT:    slli a3, a1, 40
+; RV64-NEXT:    bgez a3, .LBB8_94
+; RV64-NEXT:    j .LBB8_354
+; RV64-NEXT:  .LBB8_94: # %else260
+; RV64-NEXT:    slli a3, a1, 39
+; RV64-NEXT:    bgez a3, .LBB8_95
+; RV64-NEXT:    j .LBB8_355
+; RV64-NEXT:  .LBB8_95: # %else263
+; RV64-NEXT:    slli a3, a1, 38
+; RV64-NEXT:    bgez a3, .LBB8_96
+; RV64-NEXT:    j .LBB8_356
+; RV64-NEXT:  .LBB8_96: # %else266
+; RV64-NEXT:    slli a3, a1, 37
+; RV64-NEXT:    bgez a3, .LBB8_97
+; RV64-NEXT:    j .LBB8_357
+; RV64-NEXT:  .LBB8_97: # %else269
+; RV64-NEXT:    slli a3, a1, 36
+; RV64-NEXT:    bgez a3, .LBB8_99
+; RV64-NEXT:  .LBB8_98: # %cond.store271
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 0(a2)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_99: # %else272
+; RV64-NEXT:    slli a3, a1, 35
+; RV64-NEXT:    lui a2, 4
+; RV64-NEXT:    addiw a2, a2, -1428
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    bgez a3, .LBB8_100
+; RV64-NEXT:    j .LBB8_358
+; RV64-NEXT:  .LBB8_100: # %else275
+; RV64-NEXT:    slli a3, a1, 34
+; RV64-NEXT:    bgez a3, .LBB8_101
+; RV64-NEXT:    j .LBB8_359
+; RV64-NEXT:  .LBB8_101: # %else278
+; RV64-NEXT:    slli a3, a1, 33
+; RV64-NEXT:    bgez a3, .LBB8_102
+; RV64-NEXT:    j .LBB8_360
+; RV64-NEXT:  .LBB8_102: # %else281
+; RV64-NEXT:    slli a3, a1, 32
+; RV64-NEXT:    bgez a3, .LBB8_103
+; RV64-NEXT:    j .LBB8_361
+; RV64-NEXT:  .LBB8_103: # %else284
+; RV64-NEXT:    slli a3, a1, 31
+; RV64-NEXT:    bgez a3, .LBB8_104
+; RV64-NEXT:    j .LBB8_362
+; RV64-NEXT:  .LBB8_104: # %else287
+; RV64-NEXT:    slli a3, a1, 30
+; RV64-NEXT:    bgez a3, .LBB8_105
+; RV64-NEXT:    j .LBB8_363
+; RV64-NEXT:  .LBB8_105: # %else290
+; RV64-NEXT:    slli a3, a1, 29
+; RV64-NEXT:    bgez a3, .LBB8_106
+; RV64-NEXT:    j .LBB8_364
+; RV64-NEXT:  .LBB8_106: # %else293
+; RV64-NEXT:    slli a3, a1, 28
+; RV64-NEXT:    bgez a3, .LBB8_107
+; RV64-NEXT:    j .LBB8_365
+; RV64-NEXT:  .LBB8_107: # %else296
+; RV64-NEXT:    slli a3, a1, 27
+; RV64-NEXT:    bgez a3, .LBB8_108
+; RV64-NEXT:    j .LBB8_366
+; RV64-NEXT:  .LBB8_108: # %else299
+; RV64-NEXT:    slli a3, a1, 26
+; RV64-NEXT:    bgez a3, .LBB8_109
+; RV64-NEXT:    j .LBB8_367
+; RV64-NEXT:  .LBB8_109: # %else302
+; RV64-NEXT:    slli a3, a1, 25
+; RV64-NEXT:    bgez a3, .LBB8_110
+; RV64-NEXT:    j .LBB8_368
+; RV64-NEXT:  .LBB8_110: # %else305
+; RV64-NEXT:    slli a3, a1, 24
+; RV64-NEXT:    bgez a3, .LBB8_111
+; RV64-NEXT:    j .LBB8_369
+; RV64-NEXT:  .LBB8_111: # %else308
+; RV64-NEXT:    slli a3, a1, 23
+; RV64-NEXT:    bgez a3, .LBB8_112
+; RV64-NEXT:    j .LBB8_370
+; RV64-NEXT:  .LBB8_112: # %else311
+; RV64-NEXT:    slli a3, a1, 22
+; RV64-NEXT:    bgez a3, .LBB8_113
+; RV64-NEXT:    j .LBB8_371
+; RV64-NEXT:  .LBB8_113: # %else314
+; RV64-NEXT:    slli a3, a1, 21
+; RV64-NEXT:    bgez a3, .LBB8_114
+; RV64-NEXT:    j .LBB8_372
+; RV64-NEXT:  .LBB8_114: # %else317
+; RV64-NEXT:    slli a3, a1, 20
+; RV64-NEXT:    bgez a3, .LBB8_115
+; RV64-NEXT:    j .LBB8_373
+; RV64-NEXT:  .LBB8_115: # %else320
+; RV64-NEXT:    slli a3, a1, 19
+; RV64-NEXT:    bgez a3, .LBB8_117
+; RV64-NEXT:  .LBB8_116: # %cond.store322
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 29
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 0(a2)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_117: # %else323
+; RV64-NEXT:    slli a3, a1, 18
+; RV64-NEXT:    lui a2, 3
+; RV64-NEXT:    addiw a2, a2, 509
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    bgez a3, .LBB8_118
+; RV64-NEXT:    j .LBB8_374
+; RV64-NEXT:  .LBB8_118: # %else326
+; RV64-NEXT:    slli a3, a1, 17
+; RV64-NEXT:    bgez a3, .LBB8_119
+; RV64-NEXT:    j .LBB8_375
+; RV64-NEXT:  .LBB8_119: # %else329
+; RV64-NEXT:    slli a3, a1, 16
+; RV64-NEXT:    bgez a3, .LBB8_120
+; RV64-NEXT:    j .LBB8_376
+; RV64-NEXT:  .LBB8_120: # %else332
+; RV64-NEXT:    slli a3, a1, 15
+; RV64-NEXT:    bgez a3, .LBB8_121
+; RV64-NEXT:    j .LBB8_377
+; RV64-NEXT:  .LBB8_121: # %else335
+; RV64-NEXT:    slli a3, a1, 14
+; RV64-NEXT:    bgez a3, .LBB8_122
+; RV64-NEXT:    j .LBB8_378
+; RV64-NEXT:  .LBB8_122: # %else338
+; RV64-NEXT:    slli a3, a1, 13
+; RV64-NEXT:    bgez a3, .LBB8_123
+; RV64-NEXT:    j .LBB8_379
+; RV64-NEXT:  .LBB8_123: # %else341
+; RV64-NEXT:    slli a3, a1, 12
+; RV64-NEXT:    bgez a3, .LBB8_124
+; RV64-NEXT:    j .LBB8_380
+; RV64-NEXT:  .LBB8_124: # %else344
+; RV64-NEXT:    slli a3, a1, 11
+; RV64-NEXT:    bgez a3, .LBB8_125
+; RV64-NEXT:    j .LBB8_381
+; RV64-NEXT:  .LBB8_125: # %else347
+; RV64-NEXT:    slli a3, a1, 10
+; RV64-NEXT:    bgez a3, .LBB8_126
+; RV64-NEXT:    j .LBB8_382
+; RV64-NEXT:  .LBB8_126: # %else350
+; RV64-NEXT:    slli a3, a1, 9
+; RV64-NEXT:    bgez a3, .LBB8_127
+; RV64-NEXT:    j .LBB8_383
+; RV64-NEXT:  .LBB8_127: # %else353
+; RV64-NEXT:    slli a3, a1, 8
+; RV64-NEXT:    bgez a3, .LBB8_128
+; RV64-NEXT:    j .LBB8_384
+; RV64-NEXT:  .LBB8_128: # %else356
+; RV64-NEXT:    slli a3, a1, 7
+; RV64-NEXT:    bgez a3, .LBB8_129
+; RV64-NEXT:    j .LBB8_385
+; RV64-NEXT:  .LBB8_129: # %else359
+; RV64-NEXT:    slli a3, a1, 6
+; RV64-NEXT:    bgez a3, .LBB8_130
+; RV64-NEXT:    j .LBB8_386
+; RV64-NEXT:  .LBB8_130: # %else362
+; RV64-NEXT:    slli a3, a1, 5
+; RV64-NEXT:    bgez a3, .LBB8_131
+; RV64-NEXT:    j .LBB8_387
+; RV64-NEXT:  .LBB8_131: # %else365
+; RV64-NEXT:    slli a3, a1, 4
+; RV64-NEXT:    bgez a3, .LBB8_132
+; RV64-NEXT:    j .LBB8_388
+; RV64-NEXT:  .LBB8_132: # %else368
+; RV64-NEXT:    slli a3, a1, 3
+; RV64-NEXT:    bgez a3, .LBB8_133
+; RV64-NEXT:    j .LBB8_389
+; RV64-NEXT:  .LBB8_133: # %else371
+; RV64-NEXT:    slli a3, a1, 2
+; RV64-NEXT:    bgez a3, .LBB8_135
+; RV64-NEXT:  .LBB8_134: # %cond.store373
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 0(a2)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_135: # %else374
+; RV64-NEXT:    slli a2, a1, 1
+; RV64-NEXT:    lui a3, 3
+; RV64-NEXT:    addiw a3, a3, -1619
+; RV64-NEXT:    add a3, sp, a3
+; RV64-NEXT:    bgez a2, .LBB8_137
+; RV64-NEXT:  # %bb.136: # %cond.store376
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 256
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 2001(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:  .LBB8_137: # %else377
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a2, v8
+; RV64-NEXT:    bgez a1, .LBB8_138
+; RV64-NEXT:    j .LBB8_390
+; RV64-NEXT:  .LBB8_138: # %else380
+; RV64-NEXT:    andi a1, a2, 1
+; RV64-NEXT:    beqz a1, .LBB8_139
+; RV64-NEXT:    j .LBB8_391
+; RV64-NEXT:  .LBB8_139: # %else383
+; RV64-NEXT:    andi a1, a2, 2
+; RV64-NEXT:    beqz a1, .LBB8_140
+; RV64-NEXT:    j .LBB8_392
+; RV64-NEXT:  .LBB8_140: # %else386
+; RV64-NEXT:    andi a1, a2, 4
+; RV64-NEXT:    beqz a1, .LBB8_141
+; RV64-NEXT:    j .LBB8_393
+; RV64-NEXT:  .LBB8_141: # %else389
+; RV64-NEXT:    andi a1, a2, 8
+; RV64-NEXT:    beqz a1, .LBB8_142
+; RV64-NEXT:    j .LBB8_394
+; RV64-NEXT:  .LBB8_142: # %else392
+; RV64-NEXT:    andi a1, a2, 16
+; RV64-NEXT:    beqz a1, .LBB8_143
+; RV64-NEXT:    j .LBB8_395
+; RV64-NEXT:  .LBB8_143: # %else395
+; RV64-NEXT:    andi a1, a2, 32
+; RV64-NEXT:    beqz a1, .LBB8_144
+; RV64-NEXT:    j .LBB8_396
+; RV64-NEXT:  .LBB8_144: # %else398
+; RV64-NEXT:    andi a1, a2, 64
+; RV64-NEXT:    beqz a1, .LBB8_145
+; RV64-NEXT:    j .LBB8_397
+; RV64-NEXT:  .LBB8_145: # %else401
+; RV64-NEXT:    andi a1, a2, 128
+; RV64-NEXT:    beqz a1, .LBB8_146
+; RV64-NEXT:    j .LBB8_398
+; RV64-NEXT:  .LBB8_146: # %else404
+; RV64-NEXT:    andi a1, a2, 256
+; RV64-NEXT:    beqz a1, .LBB8_147
+; RV64-NEXT:    j .LBB8_399
+; RV64-NEXT:  .LBB8_147: # %else407
+; RV64-NEXT:    andi a1, a2, 512
+; RV64-NEXT:    beqz a1, .LBB8_148
+; RV64-NEXT:    j .LBB8_400
+; RV64-NEXT:  .LBB8_148: # %else410
+; RV64-NEXT:    andi a1, a2, 1024
+; RV64-NEXT:    beqz a1, .LBB8_149
+; RV64-NEXT:    j .LBB8_401
+; RV64-NEXT:  .LBB8_149: # %else413
+; RV64-NEXT:    slli a1, a2, 52
+; RV64-NEXT:    bgez a1, .LBB8_150
+; RV64-NEXT:    j .LBB8_402
+; RV64-NEXT:  .LBB8_150: # %else416
+; RV64-NEXT:    slli a1, a2, 51
+; RV64-NEXT:    bgez a1, .LBB8_151
+; RV64-NEXT:    j .LBB8_403
+; RV64-NEXT:  .LBB8_151: # %else419
+; RV64-NEXT:    slli a1, a2, 50
+; RV64-NEXT:    bgez a1, .LBB8_152
+; RV64-NEXT:    j .LBB8_404
+; RV64-NEXT:  .LBB8_152: # %else422
+; RV64-NEXT:    slli a1, a2, 49
+; RV64-NEXT:    bgez a1, .LBB8_153
+; RV64-NEXT:    j .LBB8_405
+; RV64-NEXT:  .LBB8_153: # %else425
+; RV64-NEXT:    slli a1, a2, 48
+; RV64-NEXT:    bgez a1, .LBB8_154
+; RV64-NEXT:    j .LBB8_406
+; RV64-NEXT:  .LBB8_154: # %else428
+; RV64-NEXT:    slli a1, a2, 47
+; RV64-NEXT:    bgez a1, .LBB8_155
+; RV64-NEXT:    j .LBB8_407
+; RV64-NEXT:  .LBB8_155: # %else431
+; RV64-NEXT:    slli a1, a2, 46
+; RV64-NEXT:    bgez a1, .LBB8_156
+; RV64-NEXT:    j .LBB8_408
+; RV64-NEXT:  .LBB8_156: # %else434
+; RV64-NEXT:    slli a1, a2, 45
+; RV64-NEXT:    bgez a1, .LBB8_157
+; RV64-NEXT:    j .LBB8_409
+; RV64-NEXT:  .LBB8_157: # %else437
+; RV64-NEXT:    slli a1, a2, 44
+; RV64-NEXT:    bgez a1, .LBB8_158
+; RV64-NEXT:    j .LBB8_410
+; RV64-NEXT:  .LBB8_158: # %else440
+; RV64-NEXT:    slli a1, a2, 43
+; RV64-NEXT:    bgez a1, .LBB8_159
+; RV64-NEXT:    j .LBB8_411
+; RV64-NEXT:  .LBB8_159: # %else443
+; RV64-NEXT:    slli a1, a2, 42
+; RV64-NEXT:    bgez a1, .LBB8_160
+; RV64-NEXT:    j .LBB8_412
+; RV64-NEXT:  .LBB8_160: # %else446
+; RV64-NEXT:    slli a1, a2, 41
+; RV64-NEXT:    bgez a1, .LBB8_161
+; RV64-NEXT:    j .LBB8_413
+; RV64-NEXT:  .LBB8_161: # %else449
+; RV64-NEXT:    slli a1, a2, 40
+; RV64-NEXT:    bgez a1, .LBB8_162
+; RV64-NEXT:    j .LBB8_414
+; RV64-NEXT:  .LBB8_162: # %else452
+; RV64-NEXT:    slli a1, a2, 39
+; RV64-NEXT:    bgez a1, .LBB8_163
+; RV64-NEXT:    j .LBB8_415
+; RV64-NEXT:  .LBB8_163: # %else455
+; RV64-NEXT:    slli a1, a2, 38
+; RV64-NEXT:    bgez a1, .LBB8_164
+; RV64-NEXT:    j .LBB8_416
+; RV64-NEXT:  .LBB8_164: # %else458
+; RV64-NEXT:    slli a1, a2, 37
+; RV64-NEXT:    bgez a1, .LBB8_165
+; RV64-NEXT:    j .LBB8_417
+; RV64-NEXT:  .LBB8_165: # %else461
+; RV64-NEXT:    slli a1, a2, 36
+; RV64-NEXT:    bgez a1, .LBB8_166
+; RV64-NEXT:    j .LBB8_418
+; RV64-NEXT:  .LBB8_166: # %else464
+; RV64-NEXT:    slli a1, a2, 35
+; RV64-NEXT:    bgez a1, .LBB8_167
+; RV64-NEXT:    j .LBB8_419
+; RV64-NEXT:  .LBB8_167: # %else467
+; RV64-NEXT:    slli a1, a2, 34
+; RV64-NEXT:    bgez a1, .LBB8_168
+; RV64-NEXT:    j .LBB8_420
+; RV64-NEXT:  .LBB8_168: # %else470
+; RV64-NEXT:    slli a1, a2, 33
+; RV64-NEXT:    bgez a1, .LBB8_169
+; RV64-NEXT:    j .LBB8_421
+; RV64-NEXT:  .LBB8_169: # %else473
+; RV64-NEXT:    slli a1, a2, 32
+; RV64-NEXT:    bgez a1, .LBB8_170
+; RV64-NEXT:    j .LBB8_422
+; RV64-NEXT:  .LBB8_170: # %else476
+; RV64-NEXT:    slli a1, a2, 31
+; RV64-NEXT:    bgez a1, .LBB8_171
+; RV64-NEXT:    j .LBB8_423
+; RV64-NEXT:  .LBB8_171: # %else479
+; RV64-NEXT:    slli a1, a2, 30
+; RV64-NEXT:    bgez a1, .LBB8_172
+; RV64-NEXT:    j .LBB8_424
+; RV64-NEXT:  .LBB8_172: # %else482
+; RV64-NEXT:    slli a1, a2, 29
+; RV64-NEXT:    bgez a1, .LBB8_173
+; RV64-NEXT:    j .LBB8_425
+; RV64-NEXT:  .LBB8_173: # %else485
+; RV64-NEXT:    slli a1, a2, 28
+; RV64-NEXT:    bgez a1, .LBB8_174
+; RV64-NEXT:    j .LBB8_426
+; RV64-NEXT:  .LBB8_174: # %else488
+; RV64-NEXT:    slli a1, a2, 27
+; RV64-NEXT:    bgez a1, .LBB8_175
+; RV64-NEXT:    j .LBB8_427
+; RV64-NEXT:  .LBB8_175: # %else491
+; RV64-NEXT:    slli a1, a2, 26
+; RV64-NEXT:    bgez a1, .LBB8_176
+; RV64-NEXT:    j .LBB8_428
+; RV64-NEXT:  .LBB8_176: # %else494
+; RV64-NEXT:    slli a1, a2, 25
+; RV64-NEXT:    bgez a1, .LBB8_177
+; RV64-NEXT:    j .LBB8_429
+; RV64-NEXT:  .LBB8_177: # %else497
+; RV64-NEXT:    slli a1, a2, 24
+; RV64-NEXT:    bgez a1, .LBB8_178
+; RV64-NEXT:    j .LBB8_430
+; RV64-NEXT:  .LBB8_178: # %else500
+; RV64-NEXT:    slli a1, a2, 23
+; RV64-NEXT:    bgez a1, .LBB8_179
+; RV64-NEXT:    j .LBB8_431
+; RV64-NEXT:  .LBB8_179: # %else503
+; RV64-NEXT:    slli a1, a2, 22
+; RV64-NEXT:    bgez a1, .LBB8_180
+; RV64-NEXT:    j .LBB8_432
+; RV64-NEXT:  .LBB8_180: # %else506
+; RV64-NEXT:    slli a1, a2, 21
+; RV64-NEXT:    bgez a1, .LBB8_181
+; RV64-NEXT:    j .LBB8_433
+; RV64-NEXT:  .LBB8_181: # %else509
+; RV64-NEXT:    slli a1, a2, 20
+; RV64-NEXT:    bgez a1, .LBB8_182
+; RV64-NEXT:    j .LBB8_434
+; RV64-NEXT:  .LBB8_182: # %else512
+; RV64-NEXT:    slli a1, a2, 19
+; RV64-NEXT:    bgez a1, .LBB8_183
+; RV64-NEXT:    j .LBB8_435
+; RV64-NEXT:  .LBB8_183: # %else515
+; RV64-NEXT:    slli a1, a2, 18
+; RV64-NEXT:    bgez a1, .LBB8_185
+; RV64-NEXT:  .LBB8_184: # %cond.store517
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 0(a3)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_185: # %else518
+; RV64-NEXT:    slli a3, a2, 17
+; RV64-NEXT:    lui a1, 2
+; RV64-NEXT:    addiw a1, a1, 318
+; RV64-NEXT:    add a1, sp, a1
+; RV64-NEXT:    bgez a3, .LBB8_186
+; RV64-NEXT:    j .LBB8_436
+; RV64-NEXT:  .LBB8_186: # %else521
+; RV64-NEXT:    slli a3, a2, 16
+; RV64-NEXT:    bgez a3, .LBB8_187
+; RV64-NEXT:    j .LBB8_437
+; RV64-NEXT:  .LBB8_187: # %else524
+; RV64-NEXT:    slli a3, a2, 15
+; RV64-NEXT:    bgez a3, .LBB8_188
+; RV64-NEXT:    j .LBB8_438
+; RV64-NEXT:  .LBB8_188: # %else527
+; RV64-NEXT:    slli a3, a2, 14
+; RV64-NEXT:    bgez a3, .LBB8_189
+; RV64-NEXT:    j .LBB8_439
+; RV64-NEXT:  .LBB8_189: # %else530
+; RV64-NEXT:    slli a3, a2, 13
+; RV64-NEXT:    bgez a3, .LBB8_190
+; RV64-NEXT:    j .LBB8_440
+; RV64-NEXT:  .LBB8_190: # %else533
+; RV64-NEXT:    slli a3, a2, 12
+; RV64-NEXT:    bgez a3, .LBB8_191
+; RV64-NEXT:    j .LBB8_441
+; RV64-NEXT:  .LBB8_191: # %else536
+; RV64-NEXT:    slli a3, a2, 11
+; RV64-NEXT:    bgez a3, .LBB8_192
+; RV64-NEXT:    j .LBB8_442
+; RV64-NEXT:  .LBB8_192: # %else539
+; RV64-NEXT:    slli a3, a2, 10
+; RV64-NEXT:    bgez a3, .LBB8_193
+; RV64-NEXT:    j .LBB8_443
+; RV64-NEXT:  .LBB8_193: # %else542
+; RV64-NEXT:    slli a3, a2, 9
+; RV64-NEXT:    bgez a3, .LBB8_194
+; RV64-NEXT:    j .LBB8_444
+; RV64-NEXT:  .LBB8_194: # %else545
+; RV64-NEXT:    slli a3, a2, 8
+; RV64-NEXT:    bgez a3, .LBB8_195
+; RV64-NEXT:    j .LBB8_445
+; RV64-NEXT:  .LBB8_195: # %else548
+; RV64-NEXT:    slli a3, a2, 7
+; RV64-NEXT:    bgez a3, .LBB8_196
+; RV64-NEXT:    j .LBB8_446
+; RV64-NEXT:  .LBB8_196: # %else551
+; RV64-NEXT:    slli a3, a2, 6
+; RV64-NEXT:    bgez a3, .LBB8_197
+; RV64-NEXT:    j .LBB8_447
+; RV64-NEXT:  .LBB8_197: # %else554
+; RV64-NEXT:    slli a3, a2, 5
+; RV64-NEXT:    bgez a3, .LBB8_198
+; RV64-NEXT:    j .LBB8_448
+; RV64-NEXT:  .LBB8_198: # %else557
+; RV64-NEXT:    slli a3, a2, 4
+; RV64-NEXT:    bgez a3, .LBB8_199
+; RV64-NEXT:    j .LBB8_449
+; RV64-NEXT:  .LBB8_199: # %else560
+; RV64-NEXT:    slli a3, a2, 3
+; RV64-NEXT:    bgez a3, .LBB8_200
+; RV64-NEXT:    j .LBB8_450
+; RV64-NEXT:  .LBB8_200: # %else563
+; RV64-NEXT:    slli a3, a2, 2
+; RV64-NEXT:    bgez a3, .LBB8_202
+; RV64-NEXT:  .LBB8_201: # %cond.store565
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 127(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:  .LBB8_202: # %else566
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    slli a3, a2, 1
+; RV64-NEXT:    vslidedown.vi v8, v8, 1
+; RV64-NEXT:    bgez a3, .LBB8_204
+; RV64-NEXT:  # %bb.203: # %cond.store568
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 256
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 0(a1)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_204: # %else569
+; RV64-NEXT:    lui a1, 2
+; RV64-NEXT:    addiw a1, a1, -1841
+; RV64-NEXT:    add a3, sp, a1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a1, v8
+; RV64-NEXT:    bgez a2, .LBB8_205
+; RV64-NEXT:    j .LBB8_451
+; RV64-NEXT:  .LBB8_205: # %else572
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    beqz a2, .LBB8_206
+; RV64-NEXT:    j .LBB8_452
+; RV64-NEXT:  .LBB8_206: # %else575
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    beqz a2, .LBB8_207
+; RV64-NEXT:    j .LBB8_453
+; RV64-NEXT:  .LBB8_207: # %else578
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    beqz a2, .LBB8_208
+; RV64-NEXT:    j .LBB8_454
+; RV64-NEXT:  .LBB8_208: # %else581
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    beqz a2, .LBB8_209
+; RV64-NEXT:    j .LBB8_455
+; RV64-NEXT:  .LBB8_209: # %else584
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    beqz a2, .LBB8_210
+; RV64-NEXT:    j .LBB8_456
+; RV64-NEXT:  .LBB8_210: # %else587
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    beqz a2, .LBB8_211
+; RV64-NEXT:    j .LBB8_457
+; RV64-NEXT:  .LBB8_211: # %else590
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    beqz a2, .LBB8_212
+; RV64-NEXT:    j .LBB8_458
+; RV64-NEXT:  .LBB8_212: # %else593
+; RV64-NEXT:    andi a2, a1, 128
+; RV64-NEXT:    beqz a2, .LBB8_213
+; RV64-NEXT:    j .LBB8_459
+; RV64-NEXT:  .LBB8_213: # %else596
+; RV64-NEXT:    andi a2, a1, 256
+; RV64-NEXT:    beqz a2, .LBB8_214
+; RV64-NEXT:    j .LBB8_460
+; RV64-NEXT:  .LBB8_214: # %else599
+; RV64-NEXT:    andi a2, a1, 512
+; RV64-NEXT:    beqz a2, .LBB8_215
+; RV64-NEXT:    j .LBB8_461
+; RV64-NEXT:  .LBB8_215: # %else602
+; RV64-NEXT:    andi a2, a1, 1024
+; RV64-NEXT:    beqz a2, .LBB8_216
+; RV64-NEXT:    j .LBB8_462
+; RV64-NEXT:  .LBB8_216: # %else605
+; RV64-NEXT:    slli a2, a1, 52
+; RV64-NEXT:    bgez a2, .LBB8_217
+; RV64-NEXT:    j .LBB8_463
+; RV64-NEXT:  .LBB8_217: # %else608
+; RV64-NEXT:    slli a2, a1, 51
+; RV64-NEXT:    bgez a2, .LBB8_218
+; RV64-NEXT:    j .LBB8_464
+; RV64-NEXT:  .LBB8_218: # %else611
+; RV64-NEXT:    slli a2, a1, 50
+; RV64-NEXT:    bgez a2, .LBB8_219
+; RV64-NEXT:    j .LBB8_465
+; RV64-NEXT:  .LBB8_219: # %else614
+; RV64-NEXT:    slli a2, a1, 49
+; RV64-NEXT:    bgez a2, .LBB8_220
+; RV64-NEXT:    j .LBB8_466
+; RV64-NEXT:  .LBB8_220: # %else617
+; RV64-NEXT:    slli a2, a1, 48
+; RV64-NEXT:    bgez a2, .LBB8_222
+; RV64-NEXT:  .LBB8_221: # %cond.store619
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, -1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 0(a3)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_222: # %else620
+; RV64-NEXT:    slli a3, a1, 47
+; RV64-NEXT:    lui a2, 1
+; RV64-NEXT:    addiw a2, a2, 96
+; RV64-NEXT:    add a2, sp, a2
+; RV64-NEXT:    bgez a3, .LBB8_223
+; RV64-NEXT:    j .LBB8_467
+; RV64-NEXT:  .LBB8_223: # %else623
+; RV64-NEXT:    slli a3, a1, 46
+; RV64-NEXT:    bgez a3, .LBB8_224
+; RV64-NEXT:    j .LBB8_468
+; RV64-NEXT:  .LBB8_224: # %else626
+; RV64-NEXT:    slli a3, a1, 45
+; RV64-NEXT:    bgez a3, .LBB8_225
+; RV64-NEXT:    j .LBB8_469
+; RV64-NEXT:  .LBB8_225: # %else629
+; RV64-NEXT:    slli a3, a1, 44
+; RV64-NEXT:    bgez a3, .LBB8_226
+; RV64-NEXT:    j .LBB8_470
+; RV64-NEXT:  .LBB8_226: # %else632
+; RV64-NEXT:    slli a3, a1, 43
+; RV64-NEXT:    bgez a3, .LBB8_227
+; RV64-NEXT:    j .LBB8_471
+; RV64-NEXT:  .LBB8_227: # %else635
+; RV64-NEXT:    slli a3, a1, 42
+; RV64-NEXT:    bgez a3, .LBB8_228
+; RV64-NEXT:    j .LBB8_472
+; RV64-NEXT:  .LBB8_228: # %else638
+; RV64-NEXT:    slli a3, a1, 41
+; RV64-NEXT:    bgez a3, .LBB8_229
+; RV64-NEXT:    j .LBB8_473
+; RV64-NEXT:  .LBB8_229: # %else641
+; RV64-NEXT:    slli a3, a1, 40
+; RV64-NEXT:    bgez a3, .LBB8_230
+; RV64-NEXT:    j .LBB8_474
+; RV64-NEXT:  .LBB8_230: # %else644
+; RV64-NEXT:    slli a3, a1, 39
+; RV64-NEXT:    bgez a3, .LBB8_231
+; RV64-NEXT:    j .LBB8_475
+; RV64-NEXT:  .LBB8_231: # %else647
+; RV64-NEXT:    slli a3, a1, 38
+; RV64-NEXT:    bgez a3, .LBB8_232
+; RV64-NEXT:    j .LBB8_476
+; RV64-NEXT:  .LBB8_232: # %else650
+; RV64-NEXT:    slli a3, a1, 37
+; RV64-NEXT:    bgez a3, .LBB8_233
+; RV64-NEXT:    j .LBB8_477
+; RV64-NEXT:  .LBB8_233: # %else653
+; RV64-NEXT:    slli a3, a1, 36
+; RV64-NEXT:    bgez a3, .LBB8_234
+; RV64-NEXT:    j .LBB8_478
+; RV64-NEXT:  .LBB8_234: # %else656
+; RV64-NEXT:    slli a3, a1, 35
+; RV64-NEXT:    bgez a3, .LBB8_235
+; RV64-NEXT:    j .LBB8_479
+; RV64-NEXT:  .LBB8_235: # %else659
+; RV64-NEXT:    slli a3, a1, 34
+; RV64-NEXT:    bgez a3, .LBB8_236
+; RV64-NEXT:    j .LBB8_480
+; RV64-NEXT:  .LBB8_236: # %else662
+; RV64-NEXT:    slli a3, a1, 33
+; RV64-NEXT:    bgez a3, .LBB8_237
+; RV64-NEXT:    j .LBB8_481
+; RV64-NEXT:  .LBB8_237: # %else665
+; RV64-NEXT:    slli a3, a1, 32
+; RV64-NEXT:    bgez a3, .LBB8_238
+; RV64-NEXT:    j .LBB8_482
+; RV64-NEXT:  .LBB8_238: # %else668
+; RV64-NEXT:    slli a3, a1, 31
+; RV64-NEXT:    bgez a3, .LBB8_240
+; RV64-NEXT:  .LBB8_239: # %cond.store670
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 0(a2)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:  .LBB8_240: # %else671
+; RV64-NEXT:    slli a3, a1, 30
+; RV64-NEXT:    addi a2, sp, 2033
+; RV64-NEXT:    bgez a3, .LBB8_241
+; RV64-NEXT:    j .LBB8_483
+; RV64-NEXT:  .LBB8_241: # %else674
+; RV64-NEXT:    slli a3, a1, 29
+; RV64-NEXT:    bgez a3, .LBB8_242
+; RV64-NEXT:    j .LBB8_484
+; RV64-NEXT:  .LBB8_242: # %else677
+; RV64-NEXT:    slli a3, a1, 28
+; RV64-NEXT:    bgez a3, .LBB8_243
+; RV64-NEXT:    j .LBB8_485
+; RV64-NEXT:  .LBB8_243: # %else680
+; RV64-NEXT:    slli a3, a1, 27
+; RV64-NEXT:    bgez a3, .LBB8_244
+; RV64-NEXT:    j .LBB8_486
+; RV64-NEXT:  .LBB8_244: # %else683
+; RV64-NEXT:    slli a3, a1, 26
+; RV64-NEXT:    bgez a3, .LBB8_245
+; RV64-NEXT:    j .LBB8_487
+; RV64-NEXT:  .LBB8_245: # %else686
+; RV64-NEXT:    slli a3, a1, 25
+; RV64-NEXT:    bgez a3, .LBB8_246
+; RV64-NEXT:    j .LBB8_488
+; RV64-NEXT:  .LBB8_246: # %else689
+; RV64-NEXT:    slli a3, a1, 24
+; RV64-NEXT:    bgez a3, .LBB8_247
+; RV64-NEXT:    j .LBB8_489
+; RV64-NEXT:  .LBB8_247: # %else692
+; RV64-NEXT:    slli a3, a1, 23
+; RV64-NEXT:    bgez a3, .LBB8_248
+; RV64-NEXT:    j .LBB8_490
+; RV64-NEXT:  .LBB8_248: # %else695
+; RV64-NEXT:    slli a3, a1, 22
+; RV64-NEXT:    bgez a3, .LBB8_249
+; RV64-NEXT:    j .LBB8_491
+; RV64-NEXT:  .LBB8_249: # %else698
+; RV64-NEXT:    slli a3, a1, 21
+; RV64-NEXT:    bgez a3, .LBB8_250
+; RV64-NEXT:    j .LBB8_492
+; RV64-NEXT:  .LBB8_250: # %else701
+; RV64-NEXT:    slli a3, a1, 20
+; RV64-NEXT:    bgez a3, .LBB8_251
+; RV64-NEXT:    j .LBB8_493
+; RV64-NEXT:  .LBB8_251: # %else704
+; RV64-NEXT:    slli a3, a1, 19
+; RV64-NEXT:    bgez a3, .LBB8_252
+; RV64-NEXT:    j .LBB8_494
+; RV64-NEXT:  .LBB8_252: # %else707
+; RV64-NEXT:    slli a3, a1, 18
+; RV64-NEXT:    bgez a3, .LBB8_253
+; RV64-NEXT:    j .LBB8_495
+; RV64-NEXT:  .LBB8_253: # %else710
+; RV64-NEXT:    slli a3, a1, 17
+; RV64-NEXT:    bgez a3, .LBB8_254
+; RV64-NEXT:    j .LBB8_496
+; RV64-NEXT:  .LBB8_254: # %else713
+; RV64-NEXT:    slli a3, a1, 16
+; RV64-NEXT:    bgez a3, .LBB8_255
+; RV64-NEXT:    j .LBB8_497
+; RV64-NEXT:  .LBB8_255: # %else716
+; RV64-NEXT:    slli a3, a1, 15
+; RV64-NEXT:    bgez a3, .LBB8_256
+; RV64-NEXT:    j .LBB8_498
+; RV64-NEXT:  .LBB8_256: # %else719
+; RV64-NEXT:    slli a3, a1, 14
+; RV64-NEXT:    bgez a3, .LBB8_257
+; RV64-NEXT:    j .LBB8_499
+; RV64-NEXT:  .LBB8_257: # %else722
+; RV64-NEXT:    slli a2, a1, 13
+; RV64-NEXT:    bgez a2, .LBB8_258
+; RV64-NEXT:    j .LBB8_500
+; RV64-NEXT:  .LBB8_258: # %else725
+; RV64-NEXT:    slli a2, a1, 12
+; RV64-NEXT:    bgez a2, .LBB8_259
+; RV64-NEXT:    j .LBB8_501
+; RV64-NEXT:  .LBB8_259: # %else728
+; RV64-NEXT:    slli a2, a1, 11
+; RV64-NEXT:    bgez a2, .LBB8_260
+; RV64-NEXT:    j .LBB8_502
+; RV64-NEXT:  .LBB8_260: # %else731
+; RV64-NEXT:    slli a2, a1, 10
+; RV64-NEXT:    bgez a2, .LBB8_261
+; RV64-NEXT:    j .LBB8_503
+; RV64-NEXT:  .LBB8_261: # %else734
+; RV64-NEXT:    slli a2, a1, 9
+; RV64-NEXT:    bgez a2, .LBB8_262
+; RV64-NEXT:    j .LBB8_504
+; RV64-NEXT:  .LBB8_262: # %else737
+; RV64-NEXT:    slli a2, a1, 8
+; RV64-NEXT:    bgez a2, .LBB8_263
+; RV64-NEXT:    j .LBB8_505
+; RV64-NEXT:  .LBB8_263: # %else740
+; RV64-NEXT:    slli a2, a1, 7
+; RV64-NEXT:    bgez a2, .LBB8_264
+; RV64-NEXT:    j .LBB8_506
+; RV64-NEXT:  .LBB8_264: # %else743
+; RV64-NEXT:    slli a2, a1, 6
+; RV64-NEXT:    bgez a2, .LBB8_265
+; RV64-NEXT:    j .LBB8_507
+; RV64-NEXT:  .LBB8_265: # %else746
+; RV64-NEXT:    slli a2, a1, 5
+; RV64-NEXT:    bgez a2, .LBB8_266
+; RV64-NEXT:    j .LBB8_508
+; RV64-NEXT:  .LBB8_266: # %else749
+; RV64-NEXT:    slli a2, a1, 4
+; RV64-NEXT:    bgez a2, .LBB8_267
+; RV64-NEXT:    j .LBB8_509
+; RV64-NEXT:  .LBB8_267: # %else752
+; RV64-NEXT:    slli a2, a1, 3
+; RV64-NEXT:    bgez a2, .LBB8_268
+; RV64-NEXT:    j .LBB8_510
+; RV64-NEXT:  .LBB8_268: # %else755
+; RV64-NEXT:    slli a2, a1, 2
+; RV64-NEXT:    bgez a2, .LBB8_269
+; RV64-NEXT:    j .LBB8_511
+; RV64-NEXT:  .LBB8_269: # %else758
+; RV64-NEXT:    slli a2, a1, 1
+; RV64-NEXT:    bgez a2, .LBB8_270
+; RV64-NEXT:    j .LBB8_512
+; RV64-NEXT:  .LBB8_270: # %else761
+; RV64-NEXT:    bgez a1, .LBB8_272
+; RV64-NEXT:  .LBB8_271: # %cond.store763
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    addi a2, sp, 128
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a2)
+; RV64-NEXT:    lbu a1, 255(sp)
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:  .LBB8_272: # %else764
+; RV64-NEXT:    lui a0, 6
+; RV64-NEXT:    addiw a0, a0, 256
+; RV64-NEXT:    sub sp, s0, a0
+; RV64-NEXT:    lui a0, 6
+; RV64-NEXT:    addiw a0, a0, -1776
+; RV64-NEXT:    add sp, sp, a0
+; RV64-NEXT:    ld ra, 2024(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 2016(sp) # 8-byte Folded Reload
+; RV64-NEXT:    addi sp, sp, 2032
+; RV64-NEXT:    ret
+; RV64-NEXT:  .LBB8_273: # %cond.store
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v16, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 2
+; RV64-NEXT:    beqz a1, .LBB8_2
+; RV64-NEXT:  .LBB8_274: # %cond.store1
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 4
+; RV64-NEXT:    beqz a1, .LBB8_3
+; RV64-NEXT:  .LBB8_275: # %cond.store4
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 2
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 8
+; RV64-NEXT:    beqz a1, .LBB8_4
+; RV64-NEXT:  .LBB8_276: # %cond.store7
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 3
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 16
+; RV64-NEXT:    beqz a1, .LBB8_5
+; RV64-NEXT:  .LBB8_277: # %cond.store10
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 4
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 32
+; RV64-NEXT:    beqz a1, .LBB8_6
+; RV64-NEXT:  .LBB8_278: # %cond.store13
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 5
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 64
+; RV64-NEXT:    beqz a1, .LBB8_7
+; RV64-NEXT:  .LBB8_279: # %cond.store16
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 6
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 128
+; RV64-NEXT:    beqz a1, .LBB8_8
+; RV64-NEXT:  .LBB8_280: # %cond.store19
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 7
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 256
+; RV64-NEXT:    beqz a1, .LBB8_9
+; RV64-NEXT:  .LBB8_281: # %cond.store22
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 8
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 512
+; RV64-NEXT:    beqz a1, .LBB8_10
+; RV64-NEXT:  .LBB8_282: # %cond.store25
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 9
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    andi a1, a2, 1024
+; RV64-NEXT:    beqz a1, .LBB8_11
+; RV64-NEXT:  .LBB8_283: # %cond.store28
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 10
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 52
+; RV64-NEXT:    bgez a1, .LBB8_12
+; RV64-NEXT:  .LBB8_284: # %cond.store31
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 11
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 51
+; RV64-NEXT:    bgez a1, .LBB8_13
+; RV64-NEXT:  .LBB8_285: # %cond.store34
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 12
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 50
+; RV64-NEXT:    bgez a1, .LBB8_14
+; RV64-NEXT:  .LBB8_286: # %cond.store37
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 13
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 49
+; RV64-NEXT:    bgez a1, .LBB8_15
+; RV64-NEXT:  .LBB8_287: # %cond.store40
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 14
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 48
+; RV64-NEXT:    bgez a1, .LBB8_16
+; RV64-NEXT:  .LBB8_288: # %cond.store43
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v16, 15
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 47
+; RV64-NEXT:    bgez a1, .LBB8_17
+; RV64-NEXT:  .LBB8_289: # %cond.store46
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 16
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 46
+; RV64-NEXT:    bgez a1, .LBB8_18
+; RV64-NEXT:  .LBB8_290: # %cond.store49
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 17
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 45
+; RV64-NEXT:    bgez a1, .LBB8_19
+; RV64-NEXT:  .LBB8_291: # %cond.store52
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 18
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 44
+; RV64-NEXT:    bgez a1, .LBB8_20
+; RV64-NEXT:  .LBB8_292: # %cond.store55
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 19
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 43
+; RV64-NEXT:    bgez a1, .LBB8_21
+; RV64-NEXT:  .LBB8_293: # %cond.store58
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 20
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 42
+; RV64-NEXT:    bgez a1, .LBB8_22
+; RV64-NEXT:  .LBB8_294: # %cond.store61
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 21
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 41
+; RV64-NEXT:    bgez a1, .LBB8_23
+; RV64-NEXT:  .LBB8_295: # %cond.store64
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 22
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 40
+; RV64-NEXT:    bltz a1, .LBB8_296
+; RV64-NEXT:    j .LBB8_24
+; RV64-NEXT:  .LBB8_296: # %cond.store67
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 23
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 39
+; RV64-NEXT:    bltz a1, .LBB8_297
+; RV64-NEXT:    j .LBB8_25
+; RV64-NEXT:  .LBB8_297: # %cond.store70
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 24
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 38
+; RV64-NEXT:    bltz a1, .LBB8_298
+; RV64-NEXT:    j .LBB8_26
+; RV64-NEXT:  .LBB8_298: # %cond.store73
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 25
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 37
+; RV64-NEXT:    bltz a1, .LBB8_299
+; RV64-NEXT:    j .LBB8_27
+; RV64-NEXT:  .LBB8_299: # %cond.store76
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 26
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 36
+; RV64-NEXT:    bltz a1, .LBB8_300
+; RV64-NEXT:    j .LBB8_28
+; RV64-NEXT:  .LBB8_300: # %cond.store79
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 27
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 35
+; RV64-NEXT:    bltz a1, .LBB8_301
+; RV64-NEXT:    j .LBB8_29
+; RV64-NEXT:  .LBB8_301: # %cond.store82
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 28
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 34
+; RV64-NEXT:    bltz a1, .LBB8_302
+; RV64-NEXT:    j .LBB8_30
+; RV64-NEXT:  .LBB8_302: # %cond.store85
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 29
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 33
+; RV64-NEXT:    bltz a1, .LBB8_303
+; RV64-NEXT:    j .LBB8_31
+; RV64-NEXT:  .LBB8_303: # %cond.store88
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v16, 30
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a1, a2, 32
+; RV64-NEXT:    bgez a1, .LBB8_513
+; RV64-NEXT:    j .LBB8_32
+; RV64-NEXT:  .LBB8_513: # %cond.store88
+; RV64-NEXT:    j .LBB8_33
+; RV64-NEXT:  .LBB8_304: # %cond.store94
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1016(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 30
+; RV64-NEXT:    bltz a3, .LBB8_305
+; RV64-NEXT:    j .LBB8_35
+; RV64-NEXT:  .LBB8_305: # %cond.store97
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 889(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 29
+; RV64-NEXT:    bltz a3, .LBB8_306
+; RV64-NEXT:    j .LBB8_36
+; RV64-NEXT:  .LBB8_306: # %cond.store100
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -256
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 762(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 28
+; RV64-NEXT:    bltz a3, .LBB8_307
+; RV64-NEXT:    j .LBB8_37
+; RV64-NEXT:  .LBB8_307: # %cond.store103
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 635(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 27
+; RV64-NEXT:    bltz a3, .LBB8_308
+; RV64-NEXT:    j .LBB8_38
+; RV64-NEXT:  .LBB8_308: # %cond.store106
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -512
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 508(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 26
+; RV64-NEXT:    bltz a3, .LBB8_309
+; RV64-NEXT:    j .LBB8_39
+; RV64-NEXT:  .LBB8_309: # %cond.store109
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 381(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 25
+; RV64-NEXT:    bltz a3, .LBB8_310
+; RV64-NEXT:    j .LBB8_40
+; RV64-NEXT:  .LBB8_310: # %cond.store112
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -768
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 254(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 24
+; RV64-NEXT:    bltz a3, .LBB8_311
+; RV64-NEXT:    j .LBB8_41
+; RV64-NEXT:  .LBB8_311: # %cond.store115
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 127(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 23
+; RV64-NEXT:    bgez a3, .LBB8_514
+; RV64-NEXT:    j .LBB8_42
+; RV64-NEXT:  .LBB8_514: # %cond.store115
+; RV64-NEXT:    j .LBB8_43
+; RV64-NEXT:  .LBB8_312: # %cond.store121
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 2032(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 21
+; RV64-NEXT:    bltz a3, .LBB8_313
+; RV64-NEXT:    j .LBB8_45
+; RV64-NEXT:  .LBB8_313: # %cond.store124
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -1280
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1905(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 20
+; RV64-NEXT:    bltz a3, .LBB8_314
+; RV64-NEXT:    j .LBB8_46
+; RV64-NEXT:  .LBB8_314: # %cond.store127
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1778(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 19
+; RV64-NEXT:    bltz a3, .LBB8_315
+; RV64-NEXT:    j .LBB8_47
+; RV64-NEXT:  .LBB8_315: # %cond.store130
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -1536
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1651(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 18
+; RV64-NEXT:    bltz a3, .LBB8_316
+; RV64-NEXT:    j .LBB8_48
+; RV64-NEXT:  .LBB8_316: # %cond.store133
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1524(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 17
+; RV64-NEXT:    bltz a3, .LBB8_317
+; RV64-NEXT:    j .LBB8_49
+; RV64-NEXT:  .LBB8_317: # %cond.store136
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -1792
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1397(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 16
+; RV64-NEXT:    bltz a3, .LBB8_318
+; RV64-NEXT:    j .LBB8_50
+; RV64-NEXT:  .LBB8_318: # %cond.store139
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 6
+; RV64-NEXT:    addiw a4, a4, -1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1270(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 15
+; RV64-NEXT:    bltz a3, .LBB8_319
+; RV64-NEXT:    j .LBB8_51
+; RV64-NEXT:  .LBB8_319: # %cond.store142
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 11
+; RV64-NEXT:    slli a4, a4, 11
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1143(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 14
+; RV64-NEXT:    bltz a3, .LBB8_320
+; RV64-NEXT:    j .LBB8_52
+; RV64-NEXT:  .LBB8_320: # %cond.store145
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1016(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 13
+; RV64-NEXT:    bltz a3, .LBB8_321
+; RV64-NEXT:    j .LBB8_53
+; RV64-NEXT:  .LBB8_321: # %cond.store148
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1792
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 889(a1)
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    addi a0, a0, 1
+; RV64-NEXT:    slli a3, a2, 12
+; RV64-NEXT:    bltz a3, .LBB8_322
+; RV64-NEXT:    j .LBB8_54
+; RV64-NEXT:  .LBB8_322: # %cond.store151
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 762(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 11
+; RV64-NEXT:    bltz a3, .LBB8_323
+; RV64-NEXT:    j .LBB8_55
+; RV64-NEXT:  .LBB8_323: # %cond.store154
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1536
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 635(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 10
+; RV64-NEXT:    bltz a3, .LBB8_324
+; RV64-NEXT:    j .LBB8_56
+; RV64-NEXT:  .LBB8_324: # %cond.store157
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 508(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 9
+; RV64-NEXT:    bltz a3, .LBB8_325
+; RV64-NEXT:    j .LBB8_57
+; RV64-NEXT:  .LBB8_325: # %cond.store160
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1280
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 381(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 8
+; RV64-NEXT:    bltz a3, .LBB8_326
+; RV64-NEXT:    j .LBB8_58
+; RV64-NEXT:  .LBB8_326: # %cond.store163
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 254(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 7
+; RV64-NEXT:    bltz a3, .LBB8_327
+; RV64-NEXT:    j .LBB8_59
+; RV64-NEXT:  .LBB8_327: # %cond.store166
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 21
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 127(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 6
+; RV64-NEXT:    bgez a3, .LBB8_515
+; RV64-NEXT:    j .LBB8_60
+; RV64-NEXT:  .LBB8_515: # %cond.store166
+; RV64-NEXT:    j .LBB8_61
+; RV64-NEXT:  .LBB8_328: # %cond.store172
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 768
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a1, 2032(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 4
+; RV64-NEXT:    bltz a1, .LBB8_329
+; RV64-NEXT:    j .LBB8_63
+; RV64-NEXT:  .LBB8_329: # %cond.store175
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a1, 1905(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 3
+; RV64-NEXT:    bltz a1, .LBB8_330
+; RV64-NEXT:    j .LBB8_64
+; RV64-NEXT:  .LBB8_330: # %cond.store178
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 512
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a1, 1778(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 2
+; RV64-NEXT:    bgez a1, .LBB8_516
+; RV64-NEXT:    j .LBB8_65
+; RV64-NEXT:  .LBB8_516: # %cond.store178
+; RV64-NEXT:    j .LBB8_66
+; RV64-NEXT:  .LBB8_331: # %cond.store187
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, 128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 1397(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB8_332
+; RV64-NEXT:    j .LBB8_70
+; RV64-NEXT:  .LBB8_332: # %cond.store190
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 1270(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB8_333
+; RV64-NEXT:    j .LBB8_71
+; RV64-NEXT:  .LBB8_333: # %cond.store193
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 1143(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB8_334
+; RV64-NEXT:    j .LBB8_72
+; RV64-NEXT:  .LBB8_334: # %cond.store196
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -256
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 1016(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB8_335
+; RV64-NEXT:    j .LBB8_73
+; RV64-NEXT:  .LBB8_335: # %cond.store199
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 889(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB8_336
+; RV64-NEXT:    j .LBB8_74
+; RV64-NEXT:  .LBB8_336: # %cond.store202
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -512
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 762(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB8_337
+; RV64-NEXT:    j .LBB8_75
+; RV64-NEXT:  .LBB8_337: # %cond.store205
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 635(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB8_338
+; RV64-NEXT:    j .LBB8_76
+; RV64-NEXT:  .LBB8_338: # %cond.store208
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -768
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 508(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 128
+; RV64-NEXT:    bnez a2, .LBB8_339
+; RV64-NEXT:    j .LBB8_77
+; RV64-NEXT:  .LBB8_339: # %cond.store211
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 381(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 256
+; RV64-NEXT:    bnez a2, .LBB8_340
+; RV64-NEXT:    j .LBB8_78
+; RV64-NEXT:  .LBB8_340: # %cond.store214
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    li a4, 19
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 254(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 512
+; RV64-NEXT:    bnez a2, .LBB8_341
+; RV64-NEXT:    j .LBB8_79
+; RV64-NEXT:  .LBB8_341: # %cond.store217
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a2, 127(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 1024
+; RV64-NEXT:    beqz a2, .LBB8_517
+; RV64-NEXT:    j .LBB8_80
+; RV64-NEXT:  .LBB8_517: # %cond.store217
+; RV64-NEXT:    j .LBB8_81
+; RV64-NEXT:  .LBB8_342: # %cond.store223
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 2032(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 51
+; RV64-NEXT:    bltz a3, .LBB8_343
+; RV64-NEXT:    j .LBB8_83
+; RV64-NEXT:  .LBB8_343: # %cond.store226
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -1536
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1905(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 50
+; RV64-NEXT:    bltz a3, .LBB8_344
+; RV64-NEXT:    j .LBB8_84
+; RV64-NEXT:  .LBB8_344: # %cond.store229
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1778(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 49
+; RV64-NEXT:    bltz a3, .LBB8_345
+; RV64-NEXT:    j .LBB8_85
+; RV64-NEXT:  .LBB8_345: # %cond.store232
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -1792
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1651(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 48
+; RV64-NEXT:    bltz a3, .LBB8_346
+; RV64-NEXT:    j .LBB8_86
+; RV64-NEXT:  .LBB8_346: # %cond.store235
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 5
+; RV64-NEXT:    addiw a4, a4, -1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1524(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 47
+; RV64-NEXT:    bltz a3, .LBB8_347
+; RV64-NEXT:    j .LBB8_87
+; RV64-NEXT:  .LBB8_347: # %cond.store238
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 9
+; RV64-NEXT:    slli a4, a4, 11
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1397(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 46
+; RV64-NEXT:    bltz a3, .LBB8_348
+; RV64-NEXT:    j .LBB8_88
+; RV64-NEXT:  .LBB8_348: # %cond.store241
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1270(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 45
+; RV64-NEXT:    bltz a3, .LBB8_349
+; RV64-NEXT:    j .LBB8_89
+; RV64-NEXT:  .LBB8_349: # %cond.store244
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 1792
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1143(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 44
+; RV64-NEXT:    bltz a3, .LBB8_350
+; RV64-NEXT:    j .LBB8_90
+; RV64-NEXT:  .LBB8_350: # %cond.store247
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1016(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 43
+; RV64-NEXT:    bltz a3, .LBB8_351
+; RV64-NEXT:    j .LBB8_91
+; RV64-NEXT:  .LBB8_351: # %cond.store250
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 1536
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 889(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 42
+; RV64-NEXT:    bltz a3, .LBB8_352
+; RV64-NEXT:    j .LBB8_92
+; RV64-NEXT:  .LBB8_352: # %cond.store253
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 762(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 41
+; RV64-NEXT:    bltz a3, .LBB8_353
+; RV64-NEXT:    j .LBB8_93
+; RV64-NEXT:  .LBB8_353: # %cond.store256
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 1280
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 635(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 40
+; RV64-NEXT:    bltz a3, .LBB8_354
+; RV64-NEXT:    j .LBB8_94
+; RV64-NEXT:  .LBB8_354: # %cond.store259
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 508(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 39
+; RV64-NEXT:    bltz a3, .LBB8_355
+; RV64-NEXT:    j .LBB8_95
+; RV64-NEXT:  .LBB8_355: # %cond.store262
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 17
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 381(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 38
+; RV64-NEXT:    bltz a3, .LBB8_356
+; RV64-NEXT:    j .LBB8_96
+; RV64-NEXT:  .LBB8_356: # %cond.store265
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 254(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 37
+; RV64-NEXT:    bltz a3, .LBB8_357
+; RV64-NEXT:    j .LBB8_97
+; RV64-NEXT:  .LBB8_357: # %cond.store268
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 768
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 127(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 36
+; RV64-NEXT:    bgez a3, .LBB8_518
+; RV64-NEXT:    j .LBB8_98
+; RV64-NEXT:  .LBB8_518: # %cond.store268
+; RV64-NEXT:    j .LBB8_99
+; RV64-NEXT:  .LBB8_358: # %cond.store274
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 512
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 2032(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 34
+; RV64-NEXT:    bltz a3, .LBB8_359
+; RV64-NEXT:    j .LBB8_101
+; RV64-NEXT:  .LBB8_359: # %cond.store277
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1905(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 33
+; RV64-NEXT:    bltz a3, .LBB8_360
+; RV64-NEXT:    j .LBB8_102
+; RV64-NEXT:  .LBB8_360: # %cond.store280
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 256
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1778(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 32
+; RV64-NEXT:    bltz a3, .LBB8_361
+; RV64-NEXT:    j .LBB8_103
+; RV64-NEXT:  .LBB8_361: # %cond.store283
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, 128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1651(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 31
+; RV64-NEXT:    bltz a3, .LBB8_362
+; RV64-NEXT:    j .LBB8_104
+; RV64-NEXT:  .LBB8_362: # %cond.store286
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1524(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 30
+; RV64-NEXT:    bltz a3, .LBB8_363
+; RV64-NEXT:    j .LBB8_105
+; RV64-NEXT:  .LBB8_363: # %cond.store289
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1397(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 29
+; RV64-NEXT:    bltz a3, .LBB8_364
+; RV64-NEXT:    j .LBB8_106
+; RV64-NEXT:  .LBB8_364: # %cond.store292
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -256
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1270(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 28
+; RV64-NEXT:    bltz a3, .LBB8_365
+; RV64-NEXT:    j .LBB8_107
+; RV64-NEXT:  .LBB8_365: # %cond.store295
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1143(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 27
+; RV64-NEXT:    bltz a3, .LBB8_366
+; RV64-NEXT:    j .LBB8_108
+; RV64-NEXT:  .LBB8_366: # %cond.store298
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 31
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1016(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 26
+; RV64-NEXT:    bltz a3, .LBB8_367
+; RV64-NEXT:    j .LBB8_109
+; RV64-NEXT:  .LBB8_367: # %cond.store301
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 889(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 25
+; RV64-NEXT:    bltz a3, .LBB8_368
+; RV64-NEXT:    j .LBB8_110
+; RV64-NEXT:  .LBB8_368: # %cond.store304
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -768
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 762(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 24
+; RV64-NEXT:    bltz a3, .LBB8_369
+; RV64-NEXT:    j .LBB8_111
+; RV64-NEXT:  .LBB8_369: # %cond.store307
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 635(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 23
+; RV64-NEXT:    bltz a3, .LBB8_370
+; RV64-NEXT:    j .LBB8_112
+; RV64-NEXT:  .LBB8_370: # %cond.store310
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 15
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 508(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 22
+; RV64-NEXT:    bltz a3, .LBB8_371
+; RV64-NEXT:    j .LBB8_113
+; RV64-NEXT:  .LBB8_371: # %cond.store313
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 381(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 21
+; RV64-NEXT:    bltz a3, .LBB8_372
+; RV64-NEXT:    j .LBB8_114
+; RV64-NEXT:  .LBB8_372: # %cond.store316
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -1280
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 254(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 20
+; RV64-NEXT:    bltz a3, .LBB8_373
+; RV64-NEXT:    j .LBB8_115
+; RV64-NEXT:  .LBB8_373: # %cond.store319
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 127(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 19
+; RV64-NEXT:    bgez a3, .LBB8_519
+; RV64-NEXT:    j .LBB8_116
+; RV64-NEXT:  .LBB8_519: # %cond.store319
+; RV64-NEXT:    j .LBB8_117
+; RV64-NEXT:  .LBB8_374: # %cond.store325
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 2032(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 17
+; RV64-NEXT:    bltz a3, .LBB8_375
+; RV64-NEXT:    j .LBB8_119
+; RV64-NEXT:  .LBB8_375: # %cond.store328
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -1792
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1905(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 16
+; RV64-NEXT:    bltz a3, .LBB8_376
+; RV64-NEXT:    j .LBB8_120
+; RV64-NEXT:  .LBB8_376: # %cond.store331
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 4
+; RV64-NEXT:    addiw a4, a4, -1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1778(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 15
+; RV64-NEXT:    bltz a3, .LBB8_377
+; RV64-NEXT:    j .LBB8_121
+; RV64-NEXT:  .LBB8_377: # %cond.store334
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 7
+; RV64-NEXT:    slli a4, a4, 11
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1651(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 14
+; RV64-NEXT:    bltz a3, .LBB8_378
+; RV64-NEXT:    j .LBB8_122
+; RV64-NEXT:  .LBB8_378: # %cond.store337
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1524(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 13
+; RV64-NEXT:    bltz a3, .LBB8_379
+; RV64-NEXT:    j .LBB8_123
+; RV64-NEXT:  .LBB8_379: # %cond.store340
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 1792
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1397(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 12
+; RV64-NEXT:    bltz a3, .LBB8_380
+; RV64-NEXT:    j .LBB8_124
+; RV64-NEXT:  .LBB8_380: # %cond.store343
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1270(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 11
+; RV64-NEXT:    bltz a3, .LBB8_381
+; RV64-NEXT:    j .LBB8_125
+; RV64-NEXT:  .LBB8_381: # %cond.store346
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 27
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1143(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 10
+; RV64-NEXT:    bltz a3, .LBB8_382
+; RV64-NEXT:    j .LBB8_126
+; RV64-NEXT:  .LBB8_382: # %cond.store349
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 1016(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 9
+; RV64-NEXT:    bltz a3, .LBB8_383
+; RV64-NEXT:    j .LBB8_127
+; RV64-NEXT:  .LBB8_383: # %cond.store352
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 1280
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 889(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 8
+; RV64-NEXT:    bltz a3, .LBB8_384
+; RV64-NEXT:    j .LBB8_128
+; RV64-NEXT:  .LBB8_384: # %cond.store355
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 762(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 7
+; RV64-NEXT:    bltz a3, .LBB8_385
+; RV64-NEXT:    j .LBB8_129
+; RV64-NEXT:  .LBB8_385: # %cond.store358
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 13
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 635(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 6
+; RV64-NEXT:    bltz a3, .LBB8_386
+; RV64-NEXT:    j .LBB8_130
+; RV64-NEXT:  .LBB8_386: # %cond.store361
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 508(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 5
+; RV64-NEXT:    bltz a3, .LBB8_387
+; RV64-NEXT:    j .LBB8_131
+; RV64-NEXT:  .LBB8_387: # %cond.store364
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 768
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 381(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 4
+; RV64-NEXT:    bltz a3, .LBB8_388
+; RV64-NEXT:    j .LBB8_132
+; RV64-NEXT:  .LBB8_388: # %cond.store367
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 254(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 3
+; RV64-NEXT:    bltz a3, .LBB8_389
+; RV64-NEXT:    j .LBB8_133
+; RV64-NEXT:  .LBB8_389: # %cond.store370
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 25
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a3, 127(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 2
+; RV64-NEXT:    bgez a3, .LBB8_520
+; RV64-NEXT:    j .LBB8_134
+; RV64-NEXT:  .LBB8_520: # %cond.store370
+; RV64-NEXT:    j .LBB8_135
+; RV64-NEXT:  .LBB8_390: # %cond.store379
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, 128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v16, (a4)
+; RV64-NEXT:    lbu a1, 1874(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a1, a2, 1
+; RV64-NEXT:    bnez a1, .LBB8_391
+; RV64-NEXT:    j .LBB8_139
+; RV64-NEXT:  .LBB8_391: # %cond.store382
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v24, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 2
+; RV64-NEXT:    bnez a1, .LBB8_392
+; RV64-NEXT:    j .LBB8_140
+; RV64-NEXT:  .LBB8_392: # %cond.store385
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 1
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 4
+; RV64-NEXT:    bnez a1, .LBB8_393
+; RV64-NEXT:    j .LBB8_141
+; RV64-NEXT:  .LBB8_393: # %cond.store388
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 2
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 8
+; RV64-NEXT:    bnez a1, .LBB8_394
+; RV64-NEXT:    j .LBB8_142
+; RV64-NEXT:  .LBB8_394: # %cond.store391
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 3
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 16
+; RV64-NEXT:    bnez a1, .LBB8_395
+; RV64-NEXT:    j .LBB8_143
+; RV64-NEXT:  .LBB8_395: # %cond.store394
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 4
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 32
+; RV64-NEXT:    bnez a1, .LBB8_396
+; RV64-NEXT:    j .LBB8_144
+; RV64-NEXT:  .LBB8_396: # %cond.store397
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 5
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 64
+; RV64-NEXT:    bnez a1, .LBB8_397
+; RV64-NEXT:    j .LBB8_145
+; RV64-NEXT:  .LBB8_397: # %cond.store400
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 6
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 128
+; RV64-NEXT:    bnez a1, .LBB8_398
+; RV64-NEXT:    j .LBB8_146
+; RV64-NEXT:  .LBB8_398: # %cond.store403
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 7
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 256
+; RV64-NEXT:    bnez a1, .LBB8_399
+; RV64-NEXT:    j .LBB8_147
+; RV64-NEXT:  .LBB8_399: # %cond.store406
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 8
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 512
+; RV64-NEXT:    bnez a1, .LBB8_400
+; RV64-NEXT:    j .LBB8_148
+; RV64-NEXT:  .LBB8_400: # %cond.store409
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 9
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    andi a1, a2, 1024
+; RV64-NEXT:    bnez a1, .LBB8_401
+; RV64-NEXT:    j .LBB8_149
+; RV64-NEXT:  .LBB8_401: # %cond.store412
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 10
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 52
+; RV64-NEXT:    bltz a1, .LBB8_402
+; RV64-NEXT:    j .LBB8_150
+; RV64-NEXT:  .LBB8_402: # %cond.store415
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 11
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 51
+; RV64-NEXT:    bltz a1, .LBB8_403
+; RV64-NEXT:    j .LBB8_151
+; RV64-NEXT:  .LBB8_403: # %cond.store418
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 12
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 50
+; RV64-NEXT:    bltz a1, .LBB8_404
+; RV64-NEXT:    j .LBB8_152
+; RV64-NEXT:  .LBB8_404: # %cond.store421
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 13
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 49
+; RV64-NEXT:    bltz a1, .LBB8_405
+; RV64-NEXT:    j .LBB8_153
+; RV64-NEXT:  .LBB8_405: # %cond.store424
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 14
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 48
+; RV64-NEXT:    bltz a1, .LBB8_406
+; RV64-NEXT:    j .LBB8_154
+; RV64-NEXT:  .LBB8_406: # %cond.store427
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v9, v24, 15
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v9, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 47
+; RV64-NEXT:    bltz a1, .LBB8_407
+; RV64-NEXT:    j .LBB8_155
+; RV64-NEXT:  .LBB8_407: # %cond.store430
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 16
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 46
+; RV64-NEXT:    bltz a1, .LBB8_408
+; RV64-NEXT:    j .LBB8_156
+; RV64-NEXT:  .LBB8_408: # %cond.store433
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 17
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 45
+; RV64-NEXT:    bltz a1, .LBB8_409
+; RV64-NEXT:    j .LBB8_157
+; RV64-NEXT:  .LBB8_409: # %cond.store436
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 18
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 44
+; RV64-NEXT:    bltz a1, .LBB8_410
+; RV64-NEXT:    j .LBB8_158
+; RV64-NEXT:  .LBB8_410: # %cond.store439
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 19
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 43
+; RV64-NEXT:    bltz a1, .LBB8_411
+; RV64-NEXT:    j .LBB8_159
+; RV64-NEXT:  .LBB8_411: # %cond.store442
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 20
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 42
+; RV64-NEXT:    bltz a1, .LBB8_412
+; RV64-NEXT:    j .LBB8_160
+; RV64-NEXT:  .LBB8_412: # %cond.store445
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 21
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 41
+; RV64-NEXT:    bltz a1, .LBB8_413
+; RV64-NEXT:    j .LBB8_161
+; RV64-NEXT:  .LBB8_413: # %cond.store448
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 22
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 40
+; RV64-NEXT:    bltz a1, .LBB8_414
+; RV64-NEXT:    j .LBB8_162
+; RV64-NEXT:  .LBB8_414: # %cond.store451
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 23
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 39
+; RV64-NEXT:    bltz a1, .LBB8_415
+; RV64-NEXT:    j .LBB8_163
+; RV64-NEXT:  .LBB8_415: # %cond.store454
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 24
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 38
+; RV64-NEXT:    bltz a1, .LBB8_416
+; RV64-NEXT:    j .LBB8_164
+; RV64-NEXT:  .LBB8_416: # %cond.store457
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 25
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 37
+; RV64-NEXT:    bltz a1, .LBB8_417
+; RV64-NEXT:    j .LBB8_165
+; RV64-NEXT:  .LBB8_417: # %cond.store460
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 26
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 36
+; RV64-NEXT:    bltz a1, .LBB8_418
+; RV64-NEXT:    j .LBB8_166
+; RV64-NEXT:  .LBB8_418: # %cond.store463
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 27
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 35
+; RV64-NEXT:    bltz a1, .LBB8_419
+; RV64-NEXT:    j .LBB8_167
+; RV64-NEXT:  .LBB8_419: # %cond.store466
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 28
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 34
+; RV64-NEXT:    bltz a1, .LBB8_420
+; RV64-NEXT:    j .LBB8_168
+; RV64-NEXT:  .LBB8_420: # %cond.store469
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 29
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 33
+; RV64-NEXT:    bltz a1, .LBB8_421
+; RV64-NEXT:    j .LBB8_169
+; RV64-NEXT:  .LBB8_421: # %cond.store472
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 30
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 32
+; RV64-NEXT:    bltz a1, .LBB8_422
+; RV64-NEXT:    j .LBB8_170
+; RV64-NEXT:  .LBB8_422: # %cond.store475
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v10, v24, 31
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    addi a1, a0, 1
+; RV64-NEXT:    vse8.v v10, (a0)
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    slli a1, a2, 31
+; RV64-NEXT:    bltz a1, .LBB8_423
+; RV64-NEXT:    j .LBB8_171
+; RV64-NEXT:  .LBB8_423: # %cond.store478
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 1651(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 30
+; RV64-NEXT:    bltz a1, .LBB8_424
+; RV64-NEXT:    j .LBB8_172
+; RV64-NEXT:  .LBB8_424: # %cond.store481
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 1524(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 29
+; RV64-NEXT:    bltz a1, .LBB8_425
+; RV64-NEXT:    j .LBB8_173
+; RV64-NEXT:  .LBB8_425: # %cond.store484
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -256
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 1397(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 28
+; RV64-NEXT:    bltz a1, .LBB8_426
+; RV64-NEXT:    j .LBB8_174
+; RV64-NEXT:  .LBB8_426: # %cond.store487
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 1270(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 27
+; RV64-NEXT:    bltz a1, .LBB8_427
+; RV64-NEXT:    j .LBB8_175
+; RV64-NEXT:  .LBB8_427: # %cond.store490
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    li a4, 23
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 1143(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 26
+; RV64-NEXT:    bltz a1, .LBB8_428
+; RV64-NEXT:    j .LBB8_176
+; RV64-NEXT:  .LBB8_428: # %cond.store493
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 1016(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 25
+; RV64-NEXT:    bltz a1, .LBB8_429
+; RV64-NEXT:    j .LBB8_177
+; RV64-NEXT:  .LBB8_429: # %cond.store496
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -768
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 889(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 24
+; RV64-NEXT:    bltz a1, .LBB8_430
+; RV64-NEXT:    j .LBB8_178
+; RV64-NEXT:  .LBB8_430: # %cond.store499
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 762(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 23
+; RV64-NEXT:    bltz a1, .LBB8_431
+; RV64-NEXT:    j .LBB8_179
+; RV64-NEXT:  .LBB8_431: # %cond.store502
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    li a4, 11
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 635(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 22
+; RV64-NEXT:    bltz a1, .LBB8_432
+; RV64-NEXT:    j .LBB8_180
+; RV64-NEXT:  .LBB8_432: # %cond.store505
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 508(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 21
+; RV64-NEXT:    bltz a1, .LBB8_433
+; RV64-NEXT:    j .LBB8_181
+; RV64-NEXT:  .LBB8_433: # %cond.store508
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -1280
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 381(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 20
+; RV64-NEXT:    bltz a1, .LBB8_434
+; RV64-NEXT:    j .LBB8_182
+; RV64-NEXT:  .LBB8_434: # %cond.store511
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 254(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 19
+; RV64-NEXT:    bltz a1, .LBB8_435
+; RV64-NEXT:    j .LBB8_183
+; RV64-NEXT:  .LBB8_435: # %cond.store514
+; RV64-NEXT:    li a1, 128
+; RV64-NEXT:    li a4, 21
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a1, 127(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a1, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a1, a2, 18
+; RV64-NEXT:    bgez a1, .LBB8_521
+; RV64-NEXT:    j .LBB8_184
+; RV64-NEXT:  .LBB8_521: # %cond.store514
+; RV64-NEXT:    j .LBB8_185
+; RV64-NEXT:  .LBB8_436: # %cond.store520
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -1792
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 2032(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 16
+; RV64-NEXT:    bltz a3, .LBB8_437
+; RV64-NEXT:    j .LBB8_187
+; RV64-NEXT:  .LBB8_437: # %cond.store523
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 3
+; RV64-NEXT:    addiw a4, a4, -1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1905(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 15
+; RV64-NEXT:    bltz a3, .LBB8_438
+; RV64-NEXT:    j .LBB8_188
+; RV64-NEXT:  .LBB8_438: # %cond.store526
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 5
+; RV64-NEXT:    slli a4, a4, 11
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1778(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 14
+; RV64-NEXT:    bltz a3, .LBB8_439
+; RV64-NEXT:    j .LBB8_189
+; RV64-NEXT:  .LBB8_439: # %cond.store529
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1651(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 13
+; RV64-NEXT:    bltz a3, .LBB8_440
+; RV64-NEXT:    j .LBB8_190
+; RV64-NEXT:  .LBB8_440: # %cond.store532
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 1792
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1524(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 12
+; RV64-NEXT:    bltz a3, .LBB8_441
+; RV64-NEXT:    j .LBB8_191
+; RV64-NEXT:  .LBB8_441: # %cond.store535
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1397(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 11
+; RV64-NEXT:    bltz a3, .LBB8_442
+; RV64-NEXT:    j .LBB8_192
+; RV64-NEXT:  .LBB8_442: # %cond.store538
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 19
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1270(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 10
+; RV64-NEXT:    bltz a3, .LBB8_443
+; RV64-NEXT:    j .LBB8_193
+; RV64-NEXT:  .LBB8_443: # %cond.store541
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1143(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 9
+; RV64-NEXT:    bltz a3, .LBB8_444
+; RV64-NEXT:    j .LBB8_194
+; RV64-NEXT:  .LBB8_444: # %cond.store544
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 1280
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1016(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 8
+; RV64-NEXT:    bltz a3, .LBB8_445
+; RV64-NEXT:    j .LBB8_195
+; RV64-NEXT:  .LBB8_445: # %cond.store547
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 889(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 7
+; RV64-NEXT:    bltz a3, .LBB8_446
+; RV64-NEXT:    j .LBB8_196
+; RV64-NEXT:  .LBB8_446: # %cond.store550
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 9
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 762(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 6
+; RV64-NEXT:    bltz a3, .LBB8_447
+; RV64-NEXT:    j .LBB8_197
+; RV64-NEXT:  .LBB8_447: # %cond.store553
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 635(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 5
+; RV64-NEXT:    bltz a3, .LBB8_448
+; RV64-NEXT:    j .LBB8_198
+; RV64-NEXT:  .LBB8_448: # %cond.store556
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 768
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 508(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 4
+; RV64-NEXT:    bltz a3, .LBB8_449
+; RV64-NEXT:    j .LBB8_199
+; RV64-NEXT:  .LBB8_449: # %cond.store559
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 381(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 3
+; RV64-NEXT:    bltz a3, .LBB8_450
+; RV64-NEXT:    j .LBB8_200
+; RV64-NEXT:  .LBB8_450: # %cond.store562
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 17
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 254(a1)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a2, 2
+; RV64-NEXT:    bgez a3, .LBB8_522
+; RV64-NEXT:    j .LBB8_201
+; RV64-NEXT:  .LBB8_522: # %cond.store562
+; RV64-NEXT:    j .LBB8_202
+; RV64-NEXT:  .LBB8_451: # %cond.store571
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, 128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 2032(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 1
+; RV64-NEXT:    bnez a2, .LBB8_452
+; RV64-NEXT:    j .LBB8_206
+; RV64-NEXT:  .LBB8_452: # %cond.store574
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 1905(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 2
+; RV64-NEXT:    bnez a2, .LBB8_453
+; RV64-NEXT:    j .LBB8_207
+; RV64-NEXT:  .LBB8_453: # %cond.store577
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, -128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 1778(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 4
+; RV64-NEXT:    bnez a2, .LBB8_454
+; RV64-NEXT:    j .LBB8_208
+; RV64-NEXT:  .LBB8_454: # %cond.store580
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    li a4, 31
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 1651(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 8
+; RV64-NEXT:    bnez a2, .LBB8_455
+; RV64-NEXT:    j .LBB8_209
+; RV64-NEXT:  .LBB8_455: # %cond.store583
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, -384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 1524(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 16
+; RV64-NEXT:    bnez a2, .LBB8_456
+; RV64-NEXT:    j .LBB8_210
+; RV64-NEXT:  .LBB8_456: # %cond.store586
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    li a4, 15
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 1397(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 32
+; RV64-NEXT:    bnez a2, .LBB8_457
+; RV64-NEXT:    j .LBB8_211
+; RV64-NEXT:  .LBB8_457: # %cond.store589
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, -640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 1270(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 64
+; RV64-NEXT:    bnez a2, .LBB8_458
+; RV64-NEXT:    j .LBB8_212
+; RV64-NEXT:  .LBB8_458: # %cond.store592
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    li a4, 29
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 1143(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 128
+; RV64-NEXT:    bnez a2, .LBB8_459
+; RV64-NEXT:    j .LBB8_213
+; RV64-NEXT:  .LBB8_459: # %cond.store595
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, -896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 1016(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 256
+; RV64-NEXT:    bnez a2, .LBB8_460
+; RV64-NEXT:    j .LBB8_214
+; RV64-NEXT:  .LBB8_460: # %cond.store598
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    li a4, 7
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 889(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 512
+; RV64-NEXT:    bnez a2, .LBB8_461
+; RV64-NEXT:    j .LBB8_215
+; RV64-NEXT:  .LBB8_461: # %cond.store601
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, -1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 762(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    andi a2, a1, 1024
+; RV64-NEXT:    bnez a2, .LBB8_462
+; RV64-NEXT:    j .LBB8_216
+; RV64-NEXT:  .LBB8_462: # %cond.store604
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    li a4, 27
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 635(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a2, a1, 52
+; RV64-NEXT:    bltz a2, .LBB8_463
+; RV64-NEXT:    j .LBB8_217
+; RV64-NEXT:  .LBB8_463: # %cond.store607
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, -1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 508(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a2, a1, 51
+; RV64-NEXT:    bltz a2, .LBB8_464
+; RV64-NEXT:    j .LBB8_218
+; RV64-NEXT:  .LBB8_464: # %cond.store610
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    li a4, 13
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 381(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a2, a1, 50
+; RV64-NEXT:    bltz a2, .LBB8_465
+; RV64-NEXT:    j .LBB8_219
+; RV64-NEXT:  .LBB8_465: # %cond.store613
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    lui a4, 2
+; RV64-NEXT:    addiw a4, a4, -1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 254(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a2, a1, 49
+; RV64-NEXT:    bltz a2, .LBB8_466
+; RV64-NEXT:    j .LBB8_220
+; RV64-NEXT:  .LBB8_466: # %cond.store616
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    li a4, 25
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 127(a3)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a2, a1, 48
+; RV64-NEXT:    bgez a2, .LBB8_523
+; RV64-NEXT:    j .LBB8_221
+; RV64-NEXT:  .LBB8_523: # %cond.store616
+; RV64-NEXT:    j .LBB8_222
+; RV64-NEXT:  .LBB8_467: # %cond.store622
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 3
+; RV64-NEXT:    slli a4, a4, 11
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 2032(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 46
+; RV64-NEXT:    bltz a3, .LBB8_468
+; RV64-NEXT:    j .LBB8_224
+; RV64-NEXT:  .LBB8_468: # %cond.store625
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    addiw a4, a4, 1920
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1905(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 45
+; RV64-NEXT:    bltz a3, .LBB8_469
+; RV64-NEXT:    j .LBB8_225
+; RV64-NEXT:  .LBB8_469: # %cond.store628
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 23
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1778(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 44
+; RV64-NEXT:    bltz a3, .LBB8_470
+; RV64-NEXT:    j .LBB8_226
+; RV64-NEXT:  .LBB8_470: # %cond.store631
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    addiw a4, a4, 1664
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1651(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 43
+; RV64-NEXT:    bltz a3, .LBB8_471
+; RV64-NEXT:    j .LBB8_227
+; RV64-NEXT:  .LBB8_471: # %cond.store634
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 11
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1524(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 42
+; RV64-NEXT:    bltz a3, .LBB8_472
+; RV64-NEXT:    j .LBB8_228
+; RV64-NEXT:  .LBB8_472: # %cond.store637
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    addiw a4, a4, 1408
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1397(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 41
+; RV64-NEXT:    bltz a3, .LBB8_473
+; RV64-NEXT:    j .LBB8_229
+; RV64-NEXT:  .LBB8_473: # %cond.store640
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 21
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1270(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 40
+; RV64-NEXT:    bltz a3, .LBB8_474
+; RV64-NEXT:    j .LBB8_230
+; RV64-NEXT:  .LBB8_474: # %cond.store643
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    addiw a4, a4, 1152
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1143(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 39
+; RV64-NEXT:    bltz a3, .LBB8_475
+; RV64-NEXT:    j .LBB8_231
+; RV64-NEXT:  .LBB8_475: # %cond.store646
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 5
+; RV64-NEXT:    slli a4, a4, 10
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1016(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 38
+; RV64-NEXT:    bltz a3, .LBB8_476
+; RV64-NEXT:    j .LBB8_232
+; RV64-NEXT:  .LBB8_476: # %cond.store649
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    addiw a4, a4, 896
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 889(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 37
+; RV64-NEXT:    bltz a3, .LBB8_477
+; RV64-NEXT:    j .LBB8_233
+; RV64-NEXT:  .LBB8_477: # %cond.store652
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 19
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 762(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 36
+; RV64-NEXT:    bltz a3, .LBB8_478
+; RV64-NEXT:    j .LBB8_234
+; RV64-NEXT:  .LBB8_478: # %cond.store655
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    addiw a4, a4, 640
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 635(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 35
+; RV64-NEXT:    bltz a3, .LBB8_479
+; RV64-NEXT:    j .LBB8_235
+; RV64-NEXT:  .LBB8_479: # %cond.store658
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 9
+; RV64-NEXT:    slli a4, a4, 9
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 508(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 34
+; RV64-NEXT:    bltz a3, .LBB8_480
+; RV64-NEXT:    j .LBB8_236
+; RV64-NEXT:  .LBB8_480: # %cond.store661
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    addiw a4, a4, 384
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 381(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 33
+; RV64-NEXT:    bltz a3, .LBB8_481
+; RV64-NEXT:    j .LBB8_237
+; RV64-NEXT:  .LBB8_481: # %cond.store664
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    li a4, 17
+; RV64-NEXT:    slli a4, a4, 8
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 254(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 32
+; RV64-NEXT:    bltz a3, .LBB8_482
+; RV64-NEXT:    j .LBB8_238
+; RV64-NEXT:  .LBB8_482: # %cond.store667
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    lui a4, 1
+; RV64-NEXT:    addiw a4, a4, 128
+; RV64-NEXT:    add a4, sp, a4
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 127(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 31
+; RV64-NEXT:    bgez a3, .LBB8_524
+; RV64-NEXT:    j .LBB8_239
+; RV64-NEXT:  .LBB8_524: # %cond.store667
+; RV64-NEXT:    j .LBB8_240
+; RV64-NEXT:  .LBB8_483: # %cond.store673
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1921
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 2032(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 29
+; RV64-NEXT:    bltz a3, .LBB8_484
+; RV64-NEXT:    j .LBB8_242
+; RV64-NEXT:  .LBB8_484: # %cond.store676
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1793
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1905(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 28
+; RV64-NEXT:    bltz a3, .LBB8_485
+; RV64-NEXT:    j .LBB8_243
+; RV64-NEXT:  .LBB8_485: # %cond.store679
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1665
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1778(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 27
+; RV64-NEXT:    bltz a3, .LBB8_486
+; RV64-NEXT:    j .LBB8_244
+; RV64-NEXT:  .LBB8_486: # %cond.store682
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1537
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1651(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 26
+; RV64-NEXT:    bltz a3, .LBB8_487
+; RV64-NEXT:    j .LBB8_245
+; RV64-NEXT:  .LBB8_487: # %cond.store685
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1409
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1524(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 25
+; RV64-NEXT:    bltz a3, .LBB8_488
+; RV64-NEXT:    j .LBB8_246
+; RV64-NEXT:  .LBB8_488: # %cond.store688
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1281
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1397(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 24
+; RV64-NEXT:    bltz a3, .LBB8_489
+; RV64-NEXT:    j .LBB8_247
+; RV64-NEXT:  .LBB8_489: # %cond.store691
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1153
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1270(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 23
+; RV64-NEXT:    bltz a3, .LBB8_490
+; RV64-NEXT:    j .LBB8_248
+; RV64-NEXT:  .LBB8_490: # %cond.store694
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1025
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1143(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 22
+; RV64-NEXT:    bltz a3, .LBB8_491
+; RV64-NEXT:    j .LBB8_249
+; RV64-NEXT:  .LBB8_491: # %cond.store697
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 897
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 1016(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 21
+; RV64-NEXT:    bltz a3, .LBB8_492
+; RV64-NEXT:    j .LBB8_250
+; RV64-NEXT:  .LBB8_492: # %cond.store700
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 769
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 889(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 20
+; RV64-NEXT:    bltz a3, .LBB8_493
+; RV64-NEXT:    j .LBB8_251
+; RV64-NEXT:  .LBB8_493: # %cond.store703
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 641
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 762(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 19
+; RV64-NEXT:    bltz a3, .LBB8_494
+; RV64-NEXT:    j .LBB8_252
+; RV64-NEXT:  .LBB8_494: # %cond.store706
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 513
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 635(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 18
+; RV64-NEXT:    bltz a3, .LBB8_495
+; RV64-NEXT:    j .LBB8_253
+; RV64-NEXT:  .LBB8_495: # %cond.store709
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 385
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 508(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 17
+; RV64-NEXT:    bltz a3, .LBB8_496
+; RV64-NEXT:    j .LBB8_254
+; RV64-NEXT:  .LBB8_496: # %cond.store712
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 257
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 381(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 16
+; RV64-NEXT:    bltz a3, .LBB8_497
+; RV64-NEXT:    j .LBB8_255
+; RV64-NEXT:  .LBB8_497: # %cond.store715
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 129
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 254(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 15
+; RV64-NEXT:    bltz a3, .LBB8_498
+; RV64-NEXT:    j .LBB8_256
+; RV64-NEXT:  .LBB8_498: # %cond.store718
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 2047
+; RV64-NEXT:    addi a4, a4, 1
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a3, 127(a2)
+; RV64-NEXT:    addi a4, a0, 1
+; RV64-NEXT:    sb a3, 0(a0)
+; RV64-NEXT:    mv a0, a4
+; RV64-NEXT:    slli a3, a1, 14
+; RV64-NEXT:    bltz a3, .LBB8_499
+; RV64-NEXT:    j .LBB8_257
+; RV64-NEXT:  .LBB8_499: # %cond.store721
+; RV64-NEXT:    li a3, 128
+; RV64-NEXT:    addi a4, sp, 1920
+; RV64-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a4)
+; RV64-NEXT:    lbu a2, 0(a2)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 13
+; RV64-NEXT:    bltz a2, .LBB8_500
+; RV64-NEXT:    j .LBB8_258
+; RV64-NEXT:  .LBB8_500: # %cond.store724
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 1792
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 1906(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 12
+; RV64-NEXT:    bltz a2, .LBB8_501
+; RV64-NEXT:    j .LBB8_259
+; RV64-NEXT:  .LBB8_501: # %cond.store727
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 1664
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 1779(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 11
+; RV64-NEXT:    bltz a2, .LBB8_502
+; RV64-NEXT:    j .LBB8_260
+; RV64-NEXT:  .LBB8_502: # %cond.store730
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 1536
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 1652(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 10
+; RV64-NEXT:    bltz a2, .LBB8_503
+; RV64-NEXT:    j .LBB8_261
+; RV64-NEXT:  .LBB8_503: # %cond.store733
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 1408
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 1525(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 9
+; RV64-NEXT:    bltz a2, .LBB8_504
+; RV64-NEXT:    j .LBB8_262
+; RV64-NEXT:  .LBB8_504: # %cond.store736
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 1280
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 1398(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 8
+; RV64-NEXT:    bltz a2, .LBB8_505
+; RV64-NEXT:    j .LBB8_263
+; RV64-NEXT:  .LBB8_505: # %cond.store739
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 1152
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 1271(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 7
+; RV64-NEXT:    bltz a2, .LBB8_506
+; RV64-NEXT:    j .LBB8_264
+; RV64-NEXT:  .LBB8_506: # %cond.store742
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 1024
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 1144(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 6
+; RV64-NEXT:    bltz a2, .LBB8_507
+; RV64-NEXT:    j .LBB8_265
+; RV64-NEXT:  .LBB8_507: # %cond.store745
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 896
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 1017(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 5
+; RV64-NEXT:    bltz a2, .LBB8_508
+; RV64-NEXT:    j .LBB8_266
+; RV64-NEXT:  .LBB8_508: # %cond.store748
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 768
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 890(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 4
+; RV64-NEXT:    bltz a2, .LBB8_509
+; RV64-NEXT:    j .LBB8_267
+; RV64-NEXT:  .LBB8_509: # %cond.store751
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 640
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 763(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 3
+; RV64-NEXT:    bltz a2, .LBB8_510
+; RV64-NEXT:    j .LBB8_268
+; RV64-NEXT:  .LBB8_510: # %cond.store754
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 512
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 636(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 2
+; RV64-NEXT:    bltz a2, .LBB8_511
+; RV64-NEXT:    j .LBB8_269
+; RV64-NEXT:  .LBB8_511: # %cond.store757
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 384
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 509(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    slli a2, a1, 1
+; RV64-NEXT:    bltz a2, .LBB8_512
+; RV64-NEXT:    j .LBB8_270
+; RV64-NEXT:  .LBB8_512: # %cond.store760
+; RV64-NEXT:    li a2, 128
+; RV64-NEXT:    addi a3, sp, 256
+; RV64-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV64-NEXT:    vse8.v v24, (a3)
+; RV64-NEXT:    lbu a2, 382(sp)
+; RV64-NEXT:    addi a3, a0, 1
+; RV64-NEXT:    sb a2, 0(a0)
+; RV64-NEXT:    mv a0, a3
+; RV64-NEXT:    bgez a1, .LBB8_525
+; RV64-NEXT:    j .LBB8_271
+; RV64-NEXT:  .LBB8_525: # %cond.store760
+; RV64-NEXT:    j .LBB8_272
+;
+; RV32-LABEL: test_compresstore_i8_v256:
+; RV32:       # %bb.0: # %entry
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vle8.v v24, (a1)
+; RV32-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a2, v0
+; RV32-NEXT:    andi a1, a2, 1
+; RV32-NEXT:    beqz a1, .LBB8_1
+; RV32-NEXT:    j .LBB8_284
+; RV32-NEXT:  .LBB8_1: # %else
+; RV32-NEXT:    andi a1, a2, 2
+; RV32-NEXT:    beqz a1, .LBB8_2
+; RV32-NEXT:    j .LBB8_285
+; RV32-NEXT:  .LBB8_2: # %else2
+; RV32-NEXT:    andi a1, a2, 4
+; RV32-NEXT:    beqz a1, .LBB8_3
+; RV32-NEXT:    j .LBB8_286
+; RV32-NEXT:  .LBB8_3: # %else5
+; RV32-NEXT:    andi a1, a2, 8
+; RV32-NEXT:    beqz a1, .LBB8_4
+; RV32-NEXT:    j .LBB8_287
+; RV32-NEXT:  .LBB8_4: # %else8
+; RV32-NEXT:    andi a1, a2, 16
+; RV32-NEXT:    beqz a1, .LBB8_5
+; RV32-NEXT:    j .LBB8_288
+; RV32-NEXT:  .LBB8_5: # %else11
+; RV32-NEXT:    andi a1, a2, 32
+; RV32-NEXT:    beqz a1, .LBB8_6
+; RV32-NEXT:    j .LBB8_289
+; RV32-NEXT:  .LBB8_6: # %else14
+; RV32-NEXT:    andi a1, a2, 64
+; RV32-NEXT:    beqz a1, .LBB8_7
+; RV32-NEXT:    j .LBB8_290
+; RV32-NEXT:  .LBB8_7: # %else17
+; RV32-NEXT:    andi a1, a2, 128
+; RV32-NEXT:    beqz a1, .LBB8_8
+; RV32-NEXT:    j .LBB8_291
+; RV32-NEXT:  .LBB8_8: # %else20
+; RV32-NEXT:    andi a1, a2, 256
+; RV32-NEXT:    beqz a1, .LBB8_9
+; RV32-NEXT:    j .LBB8_292
+; RV32-NEXT:  .LBB8_9: # %else23
+; RV32-NEXT:    andi a1, a2, 512
+; RV32-NEXT:    beqz a1, .LBB8_10
+; RV32-NEXT:    j .LBB8_293
+; RV32-NEXT:  .LBB8_10: # %else26
+; RV32-NEXT:    andi a1, a2, 1024
+; RV32-NEXT:    beqz a1, .LBB8_11
+; RV32-NEXT:    j .LBB8_294
+; RV32-NEXT:  .LBB8_11: # %else29
+; RV32-NEXT:    slli a1, a2, 20
+; RV32-NEXT:    bgez a1, .LBB8_12
+; RV32-NEXT:    j .LBB8_295
+; RV32-NEXT:  .LBB8_12: # %else32
+; RV32-NEXT:    slli a1, a2, 19
+; RV32-NEXT:    bgez a1, .LBB8_13
+; RV32-NEXT:    j .LBB8_296
+; RV32-NEXT:  .LBB8_13: # %else35
+; RV32-NEXT:    slli a1, a2, 18
+; RV32-NEXT:    bgez a1, .LBB8_14
+; RV32-NEXT:    j .LBB8_297
+; RV32-NEXT:  .LBB8_14: # %else38
+; RV32-NEXT:    slli a1, a2, 17
+; RV32-NEXT:    bgez a1, .LBB8_15
+; RV32-NEXT:    j .LBB8_298
+; RV32-NEXT:  .LBB8_15: # %else41
+; RV32-NEXT:    slli a1, a2, 16
+; RV32-NEXT:    bgez a1, .LBB8_16
+; RV32-NEXT:    j .LBB8_299
+; RV32-NEXT:  .LBB8_16: # %else44
+; RV32-NEXT:    slli a1, a2, 15
+; RV32-NEXT:    bgez a1, .LBB8_17
+; RV32-NEXT:    j .LBB8_300
+; RV32-NEXT:  .LBB8_17: # %else47
+; RV32-NEXT:    slli a1, a2, 14
+; RV32-NEXT:    bgez a1, .LBB8_18
+; RV32-NEXT:    j .LBB8_301
+; RV32-NEXT:  .LBB8_18: # %else50
+; RV32-NEXT:    slli a1, a2, 13
+; RV32-NEXT:    bgez a1, .LBB8_19
+; RV32-NEXT:    j .LBB8_302
+; RV32-NEXT:  .LBB8_19: # %else53
+; RV32-NEXT:    slli a1, a2, 12
+; RV32-NEXT:    bgez a1, .LBB8_20
+; RV32-NEXT:    j .LBB8_303
+; RV32-NEXT:  .LBB8_20: # %else56
+; RV32-NEXT:    slli a1, a2, 11
+; RV32-NEXT:    bgez a1, .LBB8_21
+; RV32-NEXT:    j .LBB8_304
+; RV32-NEXT:  .LBB8_21: # %else59
+; RV32-NEXT:    slli a1, a2, 10
+; RV32-NEXT:    bgez a1, .LBB8_22
+; RV32-NEXT:    j .LBB8_305
+; RV32-NEXT:  .LBB8_22: # %else62
+; RV32-NEXT:    slli a1, a2, 9
+; RV32-NEXT:    bgez a1, .LBB8_23
+; RV32-NEXT:    j .LBB8_306
+; RV32-NEXT:  .LBB8_23: # %else65
+; RV32-NEXT:    slli a1, a2, 8
+; RV32-NEXT:    bgez a1, .LBB8_24
+; RV32-NEXT:    j .LBB8_307
+; RV32-NEXT:  .LBB8_24: # %else68
+; RV32-NEXT:    slli a1, a2, 7
+; RV32-NEXT:    bgez a1, .LBB8_25
+; RV32-NEXT:    j .LBB8_308
+; RV32-NEXT:  .LBB8_25: # %else71
+; RV32-NEXT:    slli a1, a2, 6
+; RV32-NEXT:    bgez a1, .LBB8_26
+; RV32-NEXT:    j .LBB8_309
+; RV32-NEXT:  .LBB8_26: # %else74
+; RV32-NEXT:    slli a1, a2, 5
+; RV32-NEXT:    bgez a1, .LBB8_27
+; RV32-NEXT:    j .LBB8_310
+; RV32-NEXT:  .LBB8_27: # %else77
+; RV32-NEXT:    slli a1, a2, 4
+; RV32-NEXT:    bgez a1, .LBB8_28
+; RV32-NEXT:    j .LBB8_311
+; RV32-NEXT:  .LBB8_28: # %else80
+; RV32-NEXT:    slli a1, a2, 3
+; RV32-NEXT:    bgez a1, .LBB8_30
+; RV32-NEXT:  .LBB8_29: # %cond.store82
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 28
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:  .LBB8_30: # %else83
+; RV32-NEXT:    slli a3, a2, 2
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    bgez a3, .LBB8_32
+; RV32-NEXT:  # %bb.31: # %cond.store85
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 29
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:  .LBB8_32: # %else86
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    slli a3, a2, 1
+; RV32-NEXT:    vsrl.vx v9, v0, a1
+; RV32-NEXT:    bgez a3, .LBB8_34
+; RV32-NEXT:  # %bb.33: # %cond.store88
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 30
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:  .LBB8_34: # %else89
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a3, v9
+; RV32-NEXT:    bgez a2, .LBB8_36
+; RV32-NEXT:  # %bb.35: # %cond.store91
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 31
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:  .LBB8_36: # %else92
+; RV32-NEXT:    addi sp, sp, -2032
+; RV32-NEXT:    .cfi_def_cfa_offset 2032
+; RV32-NEXT:    sw ra, 2028(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 2024(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    addi s0, sp, 2032
+; RV32-NEXT:    .cfi_def_cfa s0, 0
+; RV32-NEXT:    lui a2, 6
+; RV32-NEXT:    addi a2, a2, -1776
+; RV32-NEXT:    sub sp, sp, a2
+; RV32-NEXT:    andi sp, sp, -128
+; RV32-NEXT:    andi a4, a3, 1
+; RV32-NEXT:    lui a2, 6
+; RV32-NEXT:    addi a2, a2, -984
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    beqz a4, .LBB8_37
+; RV32-NEXT:    j .LBB8_312
+; RV32-NEXT:  .LBB8_37: # %else95
+; RV32-NEXT:    andi a4, a3, 2
+; RV32-NEXT:    beqz a4, .LBB8_38
+; RV32-NEXT:    j .LBB8_313
+; RV32-NEXT:  .LBB8_38: # %else98
+; RV32-NEXT:    andi a4, a3, 4
+; RV32-NEXT:    beqz a4, .LBB8_39
+; RV32-NEXT:    j .LBB8_314
+; RV32-NEXT:  .LBB8_39: # %else101
+; RV32-NEXT:    andi a4, a3, 8
+; RV32-NEXT:    beqz a4, .LBB8_40
+; RV32-NEXT:    j .LBB8_315
+; RV32-NEXT:  .LBB8_40: # %else104
+; RV32-NEXT:    andi a4, a3, 16
+; RV32-NEXT:    beqz a4, .LBB8_41
+; RV32-NEXT:    j .LBB8_316
+; RV32-NEXT:  .LBB8_41: # %else107
+; RV32-NEXT:    andi a4, a3, 32
+; RV32-NEXT:    beqz a4, .LBB8_42
+; RV32-NEXT:    j .LBB8_317
+; RV32-NEXT:  .LBB8_42: # %else110
+; RV32-NEXT:    andi a4, a3, 64
+; RV32-NEXT:    beqz a4, .LBB8_43
+; RV32-NEXT:    j .LBB8_318
+; RV32-NEXT:  .LBB8_43: # %else113
+; RV32-NEXT:    andi a4, a3, 128
+; RV32-NEXT:    beqz a4, .LBB8_44
+; RV32-NEXT:    j .LBB8_319
+; RV32-NEXT:  .LBB8_44: # %else116
+; RV32-NEXT:    andi a4, a3, 256
+; RV32-NEXT:    beqz a4, .LBB8_46
+; RV32-NEXT:  .LBB8_45: # %cond.store118
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 23
+; RV32-NEXT:    slli a5, a5, 10
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 0(a2)
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:  .LBB8_46: # %else119
+; RV32-NEXT:    andi a4, a3, 512
+; RV32-NEXT:    lui a2, 5
+; RV32-NEXT:    addi a2, a2, 953
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    beqz a4, .LBB8_47
+; RV32-NEXT:    j .LBB8_320
+; RV32-NEXT:  .LBB8_47: # %else122
+; RV32-NEXT:    andi a4, a3, 1024
+; RV32-NEXT:    beqz a4, .LBB8_48
+; RV32-NEXT:    j .LBB8_321
+; RV32-NEXT:  .LBB8_48: # %else125
+; RV32-NEXT:    slli a4, a3, 20
+; RV32-NEXT:    bgez a4, .LBB8_49
+; RV32-NEXT:    j .LBB8_322
+; RV32-NEXT:  .LBB8_49: # %else128
+; RV32-NEXT:    slli a4, a3, 19
+; RV32-NEXT:    bgez a4, .LBB8_50
+; RV32-NEXT:    j .LBB8_323
+; RV32-NEXT:  .LBB8_50: # %else131
+; RV32-NEXT:    slli a4, a3, 18
+; RV32-NEXT:    bgez a4, .LBB8_51
+; RV32-NEXT:    j .LBB8_324
+; RV32-NEXT:  .LBB8_51: # %else134
+; RV32-NEXT:    slli a4, a3, 17
+; RV32-NEXT:    bgez a4, .LBB8_52
+; RV32-NEXT:    j .LBB8_325
+; RV32-NEXT:  .LBB8_52: # %else137
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    bgez a4, .LBB8_53
+; RV32-NEXT:    j .LBB8_326
+; RV32-NEXT:  .LBB8_53: # %else140
+; RV32-NEXT:    slli a4, a3, 15
+; RV32-NEXT:    bgez a4, .LBB8_54
+; RV32-NEXT:    j .LBB8_327
+; RV32-NEXT:  .LBB8_54: # %else143
+; RV32-NEXT:    slli a4, a3, 14
+; RV32-NEXT:    bgez a4, .LBB8_55
+; RV32-NEXT:    j .LBB8_328
+; RV32-NEXT:  .LBB8_55: # %else146
+; RV32-NEXT:    slli a4, a3, 13
+; RV32-NEXT:    bgez a4, .LBB8_56
+; RV32-NEXT:    j .LBB8_329
+; RV32-NEXT:  .LBB8_56: # %else149
+; RV32-NEXT:    slli a4, a3, 12
+; RV32-NEXT:    bgez a4, .LBB8_57
+; RV32-NEXT:    j .LBB8_330
+; RV32-NEXT:  .LBB8_57: # %else152
+; RV32-NEXT:    slli a4, a3, 11
+; RV32-NEXT:    bgez a4, .LBB8_58
+; RV32-NEXT:    j .LBB8_331
+; RV32-NEXT:  .LBB8_58: # %else155
+; RV32-NEXT:    slli a4, a3, 10
+; RV32-NEXT:    bgez a4, .LBB8_59
+; RV32-NEXT:    j .LBB8_332
+; RV32-NEXT:  .LBB8_59: # %else158
+; RV32-NEXT:    slli a4, a3, 9
+; RV32-NEXT:    bgez a4, .LBB8_60
+; RV32-NEXT:    j .LBB8_333
+; RV32-NEXT:  .LBB8_60: # %else161
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    bgez a4, .LBB8_61
+; RV32-NEXT:    j .LBB8_334
+; RV32-NEXT:  .LBB8_61: # %else164
+; RV32-NEXT:    slli a4, a3, 7
+; RV32-NEXT:    bgez a4, .LBB8_62
+; RV32-NEXT:    j .LBB8_335
+; RV32-NEXT:  .LBB8_62: # %else167
+; RV32-NEXT:    slli a4, a3, 6
+; RV32-NEXT:    bgez a4, .LBB8_64
+; RV32-NEXT:  .LBB8_63: # %cond.store169
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 896
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 0(a2)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_64: # %else170
+; RV32-NEXT:    slli a2, a3, 5
+; RV32-NEXT:    lui a4, 5
+; RV32-NEXT:    addi a4, a4, -1206
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    bgez a2, .LBB8_65
+; RV32-NEXT:    j .LBB8_336
+; RV32-NEXT:  .LBB8_65: # %else173
+; RV32-NEXT:    slli a2, a3, 4
+; RV32-NEXT:    bgez a2, .LBB8_66
+; RV32-NEXT:    j .LBB8_337
+; RV32-NEXT:  .LBB8_66: # %else176
+; RV32-NEXT:    slli a2, a3, 3
+; RV32-NEXT:    bgez a2, .LBB8_67
+; RV32-NEXT:    j .LBB8_338
+; RV32-NEXT:  .LBB8_67: # %else179
+; RV32-NEXT:    slli a2, a3, 2
+; RV32-NEXT:    bgez a2, .LBB8_69
+; RV32-NEXT:  .LBB8_68: # %cond.store181
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1651(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:  .LBB8_69: # %else182
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    slli a2, a3, 1
+; RV32-NEXT:    vslidedown.vi v9, v0, 1
+; RV32-NEXT:    bgez a2, .LBB8_71
+; RV32-NEXT:  # %bb.70: # %cond.store184
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 256
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1524(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:  .LBB8_71: # %else185
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a2, v9
+; RV32-NEXT:    bgez a3, .LBB8_72
+; RV32-NEXT:    j .LBB8_339
+; RV32-NEXT:  .LBB8_72: # %else188
+; RV32-NEXT:    andi a3, a2, 1
+; RV32-NEXT:    beqz a3, .LBB8_73
+; RV32-NEXT:    j .LBB8_340
+; RV32-NEXT:  .LBB8_73: # %else191
+; RV32-NEXT:    andi a3, a2, 2
+; RV32-NEXT:    beqz a3, .LBB8_74
+; RV32-NEXT:    j .LBB8_341
+; RV32-NEXT:  .LBB8_74: # %else194
+; RV32-NEXT:    andi a3, a2, 4
+; RV32-NEXT:    beqz a3, .LBB8_75
+; RV32-NEXT:    j .LBB8_342
+; RV32-NEXT:  .LBB8_75: # %else197
+; RV32-NEXT:    andi a3, a2, 8
+; RV32-NEXT:    beqz a3, .LBB8_76
+; RV32-NEXT:    j .LBB8_343
+; RV32-NEXT:  .LBB8_76: # %else200
+; RV32-NEXT:    andi a3, a2, 16
+; RV32-NEXT:    beqz a3, .LBB8_77
+; RV32-NEXT:    j .LBB8_344
+; RV32-NEXT:  .LBB8_77: # %else203
+; RV32-NEXT:    andi a3, a2, 32
+; RV32-NEXT:    beqz a3, .LBB8_78
+; RV32-NEXT:    j .LBB8_345
+; RV32-NEXT:  .LBB8_78: # %else206
+; RV32-NEXT:    andi a3, a2, 64
+; RV32-NEXT:    beqz a3, .LBB8_79
+; RV32-NEXT:    j .LBB8_346
+; RV32-NEXT:  .LBB8_79: # %else209
+; RV32-NEXT:    andi a3, a2, 128
+; RV32-NEXT:    beqz a3, .LBB8_80
+; RV32-NEXT:    j .LBB8_347
+; RV32-NEXT:  .LBB8_80: # %else212
+; RV32-NEXT:    andi a3, a2, 256
+; RV32-NEXT:    beqz a3, .LBB8_81
+; RV32-NEXT:    j .LBB8_348
+; RV32-NEXT:  .LBB8_81: # %else215
+; RV32-NEXT:    andi a3, a2, 512
+; RV32-NEXT:    beqz a3, .LBB8_82
+; RV32-NEXT:    j .LBB8_349
+; RV32-NEXT:  .LBB8_82: # %else218
+; RV32-NEXT:    andi a3, a2, 1024
+; RV32-NEXT:    beqz a3, .LBB8_84
+; RV32-NEXT:  .LBB8_83: # %cond.store220
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -1280
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 0(a4)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_84: # %else221
+; RV32-NEXT:    slli a4, a2, 20
+; RV32-NEXT:    lui a3, 4
+; RV32-NEXT:    addi a3, a3, 731
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    bgez a4, .LBB8_85
+; RV32-NEXT:    j .LBB8_350
+; RV32-NEXT:  .LBB8_85: # %else224
+; RV32-NEXT:    slli a4, a2, 19
+; RV32-NEXT:    bgez a4, .LBB8_86
+; RV32-NEXT:    j .LBB8_351
+; RV32-NEXT:  .LBB8_86: # %else227
+; RV32-NEXT:    slli a4, a2, 18
+; RV32-NEXT:    bgez a4, .LBB8_87
+; RV32-NEXT:    j .LBB8_352
+; RV32-NEXT:  .LBB8_87: # %else230
+; RV32-NEXT:    slli a4, a2, 17
+; RV32-NEXT:    bgez a4, .LBB8_88
+; RV32-NEXT:    j .LBB8_353
+; RV32-NEXT:  .LBB8_88: # %else233
+; RV32-NEXT:    slli a4, a2, 16
+; RV32-NEXT:    bgez a4, .LBB8_89
+; RV32-NEXT:    j .LBB8_354
+; RV32-NEXT:  .LBB8_89: # %else236
+; RV32-NEXT:    slli a4, a2, 15
+; RV32-NEXT:    bgez a4, .LBB8_90
+; RV32-NEXT:    j .LBB8_355
+; RV32-NEXT:  .LBB8_90: # %else239
+; RV32-NEXT:    slli a4, a2, 14
+; RV32-NEXT:    bgez a4, .LBB8_91
+; RV32-NEXT:    j .LBB8_356
+; RV32-NEXT:  .LBB8_91: # %else242
+; RV32-NEXT:    slli a4, a2, 13
+; RV32-NEXT:    bgez a4, .LBB8_92
+; RV32-NEXT:    j .LBB8_357
+; RV32-NEXT:  .LBB8_92: # %else245
+; RV32-NEXT:    slli a4, a2, 12
+; RV32-NEXT:    bgez a4, .LBB8_93
+; RV32-NEXT:    j .LBB8_358
+; RV32-NEXT:  .LBB8_93: # %else248
+; RV32-NEXT:    slli a4, a2, 11
+; RV32-NEXT:    bgez a4, .LBB8_94
+; RV32-NEXT:    j .LBB8_359
+; RV32-NEXT:  .LBB8_94: # %else251
+; RV32-NEXT:    slli a4, a2, 10
+; RV32-NEXT:    bgez a4, .LBB8_95
+; RV32-NEXT:    j .LBB8_360
+; RV32-NEXT:  .LBB8_95: # %else254
+; RV32-NEXT:    slli a4, a2, 9
+; RV32-NEXT:    bgez a4, .LBB8_96
+; RV32-NEXT:    j .LBB8_361
+; RV32-NEXT:  .LBB8_96: # %else257
+; RV32-NEXT:    slli a4, a2, 8
+; RV32-NEXT:    bgez a4, .LBB8_97
+; RV32-NEXT:    j .LBB8_362
+; RV32-NEXT:  .LBB8_97: # %else260
+; RV32-NEXT:    slli a4, a2, 7
+; RV32-NEXT:    bgez a4, .LBB8_98
+; RV32-NEXT:    j .LBB8_363
+; RV32-NEXT:  .LBB8_98: # %else263
+; RV32-NEXT:    slli a4, a2, 6
+; RV32-NEXT:    bgez a4, .LBB8_99
+; RV32-NEXT:    j .LBB8_364
+; RV32-NEXT:  .LBB8_99: # %else266
+; RV32-NEXT:    slli a4, a2, 5
+; RV32-NEXT:    bgez a4, .LBB8_100
+; RV32-NEXT:    j .LBB8_365
+; RV32-NEXT:  .LBB8_100: # %else269
+; RV32-NEXT:    slli a4, a2, 4
+; RV32-NEXT:    bgez a4, .LBB8_102
+; RV32-NEXT:  .LBB8_101: # %cond.store271
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 640
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 0(a3)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_102: # %else272
+; RV32-NEXT:    slli a3, a2, 3
+; RV32-NEXT:    lui a4, 4
+; RV32-NEXT:    addi a4, a4, -1428
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    bgez a3, .LBB8_104
+; RV32-NEXT:  # %bb.103: # %cond.store274
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 512
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 2032(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:  .LBB8_104: # %else275
+; RV32-NEXT:    slli a3, a2, 2
+; RV32-NEXT:    bgez a3, .LBB8_106
+; RV32-NEXT:  # %bb.105: # %cond.store277
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 1905(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:  .LBB8_106: # %else278
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    slli a3, a2, 1
+; RV32-NEXT:    vsrl.vx v9, v9, a1
+; RV32-NEXT:    bgez a3, .LBB8_108
+; RV32-NEXT:  # %bb.107: # %cond.store280
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 256
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 1778(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:  .LBB8_108: # %else281
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a3, v9
+; RV32-NEXT:    bgez a2, .LBB8_109
+; RV32-NEXT:    j .LBB8_366
+; RV32-NEXT:  .LBB8_109: # %else284
+; RV32-NEXT:    andi a2, a3, 1
+; RV32-NEXT:    beqz a2, .LBB8_110
+; RV32-NEXT:    j .LBB8_367
+; RV32-NEXT:  .LBB8_110: # %else287
+; RV32-NEXT:    andi a2, a3, 2
+; RV32-NEXT:    beqz a2, .LBB8_111
+; RV32-NEXT:    j .LBB8_368
+; RV32-NEXT:  .LBB8_111: # %else290
+; RV32-NEXT:    andi a2, a3, 4
+; RV32-NEXT:    beqz a2, .LBB8_112
+; RV32-NEXT:    j .LBB8_369
+; RV32-NEXT:  .LBB8_112: # %else293
+; RV32-NEXT:    andi a2, a3, 8
+; RV32-NEXT:    beqz a2, .LBB8_113
+; RV32-NEXT:    j .LBB8_370
+; RV32-NEXT:  .LBB8_113: # %else296
+; RV32-NEXT:    andi a2, a3, 16
+; RV32-NEXT:    beqz a2, .LBB8_114
+; RV32-NEXT:    j .LBB8_371
+; RV32-NEXT:  .LBB8_114: # %else299
+; RV32-NEXT:    andi a2, a3, 32
+; RV32-NEXT:    beqz a2, .LBB8_115
+; RV32-NEXT:    j .LBB8_372
+; RV32-NEXT:  .LBB8_115: # %else302
+; RV32-NEXT:    andi a2, a3, 64
+; RV32-NEXT:    beqz a2, .LBB8_116
+; RV32-NEXT:    j .LBB8_373
+; RV32-NEXT:  .LBB8_116: # %else305
+; RV32-NEXT:    andi a2, a3, 128
+; RV32-NEXT:    beqz a2, .LBB8_117
+; RV32-NEXT:    j .LBB8_374
+; RV32-NEXT:  .LBB8_117: # %else308
+; RV32-NEXT:    andi a2, a3, 256
+; RV32-NEXT:    beqz a2, .LBB8_118
+; RV32-NEXT:    j .LBB8_375
+; RV32-NEXT:  .LBB8_118: # %else311
+; RV32-NEXT:    andi a2, a3, 512
+; RV32-NEXT:    beqz a2, .LBB8_119
+; RV32-NEXT:    j .LBB8_376
+; RV32-NEXT:  .LBB8_119: # %else314
+; RV32-NEXT:    andi a2, a3, 1024
+; RV32-NEXT:    beqz a2, .LBB8_120
+; RV32-NEXT:    j .LBB8_377
+; RV32-NEXT:  .LBB8_120: # %else317
+; RV32-NEXT:    slli a2, a3, 20
+; RV32-NEXT:    bgez a2, .LBB8_121
+; RV32-NEXT:    j .LBB8_378
+; RV32-NEXT:  .LBB8_121: # %else320
+; RV32-NEXT:    slli a2, a3, 19
+; RV32-NEXT:    bgez a2, .LBB8_123
+; RV32-NEXT:  .LBB8_122: # %cond.store322
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    li a5, 29
+; RV32-NEXT:    slli a5, a5, 9
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 0(a4)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_123: # %else323
+; RV32-NEXT:    slli a4, a3, 18
+; RV32-NEXT:    lui a2, 3
+; RV32-NEXT:    addi a2, a2, 509
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    bgez a4, .LBB8_124
+; RV32-NEXT:    j .LBB8_379
+; RV32-NEXT:  .LBB8_124: # %else326
+; RV32-NEXT:    slli a4, a3, 17
+; RV32-NEXT:    bgez a4, .LBB8_125
+; RV32-NEXT:    j .LBB8_380
+; RV32-NEXT:  .LBB8_125: # %else329
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    bgez a4, .LBB8_126
+; RV32-NEXT:    j .LBB8_381
+; RV32-NEXT:  .LBB8_126: # %else332
+; RV32-NEXT:    slli a4, a3, 15
+; RV32-NEXT:    bgez a4, .LBB8_127
+; RV32-NEXT:    j .LBB8_382
+; RV32-NEXT:  .LBB8_127: # %else335
+; RV32-NEXT:    slli a4, a3, 14
+; RV32-NEXT:    bgez a4, .LBB8_128
+; RV32-NEXT:    j .LBB8_383
+; RV32-NEXT:  .LBB8_128: # %else338
+; RV32-NEXT:    slli a4, a3, 13
+; RV32-NEXT:    bgez a4, .LBB8_129
+; RV32-NEXT:    j .LBB8_384
+; RV32-NEXT:  .LBB8_129: # %else341
+; RV32-NEXT:    slli a4, a3, 12
+; RV32-NEXT:    bgez a4, .LBB8_130
+; RV32-NEXT:    j .LBB8_385
+; RV32-NEXT:  .LBB8_130: # %else344
+; RV32-NEXT:    slli a4, a3, 11
+; RV32-NEXT:    bgez a4, .LBB8_131
+; RV32-NEXT:    j .LBB8_386
+; RV32-NEXT:  .LBB8_131: # %else347
+; RV32-NEXT:    slli a4, a3, 10
+; RV32-NEXT:    bgez a4, .LBB8_132
+; RV32-NEXT:    j .LBB8_387
+; RV32-NEXT:  .LBB8_132: # %else350
+; RV32-NEXT:    slli a4, a3, 9
+; RV32-NEXT:    bgez a4, .LBB8_133
+; RV32-NEXT:    j .LBB8_388
+; RV32-NEXT:  .LBB8_133: # %else353
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    bgez a4, .LBB8_134
+; RV32-NEXT:    j .LBB8_389
+; RV32-NEXT:  .LBB8_134: # %else356
+; RV32-NEXT:    slli a4, a3, 7
+; RV32-NEXT:    bgez a4, .LBB8_135
+; RV32-NEXT:    j .LBB8_390
+; RV32-NEXT:  .LBB8_135: # %else359
+; RV32-NEXT:    slli a4, a3, 6
+; RV32-NEXT:    bgez a4, .LBB8_136
+; RV32-NEXT:    j .LBB8_391
+; RV32-NEXT:  .LBB8_136: # %else362
+; RV32-NEXT:    slli a4, a3, 5
+; RV32-NEXT:    bgez a4, .LBB8_137
+; RV32-NEXT:    j .LBB8_392
+; RV32-NEXT:  .LBB8_137: # %else365
+; RV32-NEXT:    slli a4, a3, 4
+; RV32-NEXT:    bgez a4, .LBB8_138
+; RV32-NEXT:    j .LBB8_393
+; RV32-NEXT:  .LBB8_138: # %else368
+; RV32-NEXT:    slli a4, a3, 3
+; RV32-NEXT:    bgez a4, .LBB8_139
+; RV32-NEXT:    j .LBB8_394
+; RV32-NEXT:  .LBB8_139: # %else371
+; RV32-NEXT:    slli a4, a3, 2
+; RV32-NEXT:    bgez a4, .LBB8_141
+; RV32-NEXT:  .LBB8_140: # %cond.store373
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 0(a2)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_141: # %else374
+; RV32-NEXT:    slli a4, a3, 1
+; RV32-NEXT:    lui a2, 3
+; RV32-NEXT:    addi a2, a2, -1619
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    bgez a4, .LBB8_143
+; RV32-NEXT:  # %bb.142: # %cond.store376
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 256
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 2001(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:  .LBB8_143: # %else377
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a4, v8
+; RV32-NEXT:    bgez a3, .LBB8_144
+; RV32-NEXT:    j .LBB8_395
+; RV32-NEXT:  .LBB8_144: # %else380
+; RV32-NEXT:    andi a3, a4, 1
+; RV32-NEXT:    beqz a3, .LBB8_145
+; RV32-NEXT:    j .LBB8_396
+; RV32-NEXT:  .LBB8_145: # %else383
+; RV32-NEXT:    andi a3, a4, 2
+; RV32-NEXT:    beqz a3, .LBB8_146
+; RV32-NEXT:    j .LBB8_397
+; RV32-NEXT:  .LBB8_146: # %else386
+; RV32-NEXT:    andi a3, a4, 4
+; RV32-NEXT:    beqz a3, .LBB8_147
+; RV32-NEXT:    j .LBB8_398
+; RV32-NEXT:  .LBB8_147: # %else389
+; RV32-NEXT:    andi a3, a4, 8
+; RV32-NEXT:    beqz a3, .LBB8_148
+; RV32-NEXT:    j .LBB8_399
+; RV32-NEXT:  .LBB8_148: # %else392
+; RV32-NEXT:    andi a3, a4, 16
+; RV32-NEXT:    beqz a3, .LBB8_149
+; RV32-NEXT:    j .LBB8_400
+; RV32-NEXT:  .LBB8_149: # %else395
+; RV32-NEXT:    andi a3, a4, 32
+; RV32-NEXT:    beqz a3, .LBB8_150
+; RV32-NEXT:    j .LBB8_401
+; RV32-NEXT:  .LBB8_150: # %else398
+; RV32-NEXT:    andi a3, a4, 64
+; RV32-NEXT:    beqz a3, .LBB8_151
+; RV32-NEXT:    j .LBB8_402
+; RV32-NEXT:  .LBB8_151: # %else401
+; RV32-NEXT:    andi a3, a4, 128
+; RV32-NEXT:    beqz a3, .LBB8_152
+; RV32-NEXT:    j .LBB8_403
+; RV32-NEXT:  .LBB8_152: # %else404
+; RV32-NEXT:    andi a3, a4, 256
+; RV32-NEXT:    beqz a3, .LBB8_153
+; RV32-NEXT:    j .LBB8_404
+; RV32-NEXT:  .LBB8_153: # %else407
+; RV32-NEXT:    andi a3, a4, 512
+; RV32-NEXT:    beqz a3, .LBB8_154
+; RV32-NEXT:    j .LBB8_405
+; RV32-NEXT:  .LBB8_154: # %else410
+; RV32-NEXT:    andi a3, a4, 1024
+; RV32-NEXT:    beqz a3, .LBB8_155
+; RV32-NEXT:    j .LBB8_406
+; RV32-NEXT:  .LBB8_155: # %else413
+; RV32-NEXT:    slli a3, a4, 20
+; RV32-NEXT:    bgez a3, .LBB8_156
+; RV32-NEXT:    j .LBB8_407
+; RV32-NEXT:  .LBB8_156: # %else416
+; RV32-NEXT:    slli a3, a4, 19
+; RV32-NEXT:    bgez a3, .LBB8_157
+; RV32-NEXT:    j .LBB8_408
+; RV32-NEXT:  .LBB8_157: # %else419
+; RV32-NEXT:    slli a3, a4, 18
+; RV32-NEXT:    bgez a3, .LBB8_158
+; RV32-NEXT:    j .LBB8_409
+; RV32-NEXT:  .LBB8_158: # %else422
+; RV32-NEXT:    slli a3, a4, 17
+; RV32-NEXT:    bgez a3, .LBB8_159
+; RV32-NEXT:    j .LBB8_410
+; RV32-NEXT:  .LBB8_159: # %else425
+; RV32-NEXT:    slli a3, a4, 16
+; RV32-NEXT:    bgez a3, .LBB8_160
+; RV32-NEXT:    j .LBB8_411
+; RV32-NEXT:  .LBB8_160: # %else428
+; RV32-NEXT:    slli a3, a4, 15
+; RV32-NEXT:    bgez a3, .LBB8_161
+; RV32-NEXT:    j .LBB8_412
+; RV32-NEXT:  .LBB8_161: # %else431
+; RV32-NEXT:    slli a3, a4, 14
+; RV32-NEXT:    bgez a3, .LBB8_162
+; RV32-NEXT:    j .LBB8_413
+; RV32-NEXT:  .LBB8_162: # %else434
+; RV32-NEXT:    slli a3, a4, 13
+; RV32-NEXT:    bgez a3, .LBB8_163
+; RV32-NEXT:    j .LBB8_414
+; RV32-NEXT:  .LBB8_163: # %else437
+; RV32-NEXT:    slli a3, a4, 12
+; RV32-NEXT:    bgez a3, .LBB8_164
+; RV32-NEXT:    j .LBB8_415
+; RV32-NEXT:  .LBB8_164: # %else440
+; RV32-NEXT:    slli a3, a4, 11
+; RV32-NEXT:    bgez a3, .LBB8_165
+; RV32-NEXT:    j .LBB8_416
+; RV32-NEXT:  .LBB8_165: # %else443
+; RV32-NEXT:    slli a3, a4, 10
+; RV32-NEXT:    bgez a3, .LBB8_166
+; RV32-NEXT:    j .LBB8_417
+; RV32-NEXT:  .LBB8_166: # %else446
+; RV32-NEXT:    slli a3, a4, 9
+; RV32-NEXT:    bgez a3, .LBB8_167
+; RV32-NEXT:    j .LBB8_418
+; RV32-NEXT:  .LBB8_167: # %else449
+; RV32-NEXT:    slli a3, a4, 8
+; RV32-NEXT:    bgez a3, .LBB8_168
+; RV32-NEXT:    j .LBB8_419
+; RV32-NEXT:  .LBB8_168: # %else452
+; RV32-NEXT:    slli a3, a4, 7
+; RV32-NEXT:    bgez a3, .LBB8_169
+; RV32-NEXT:    j .LBB8_420
+; RV32-NEXT:  .LBB8_169: # %else455
+; RV32-NEXT:    slli a3, a4, 6
+; RV32-NEXT:    bgez a3, .LBB8_170
+; RV32-NEXT:    j .LBB8_421
+; RV32-NEXT:  .LBB8_170: # %else458
+; RV32-NEXT:    slli a3, a4, 5
+; RV32-NEXT:    bgez a3, .LBB8_171
+; RV32-NEXT:    j .LBB8_422
+; RV32-NEXT:  .LBB8_171: # %else461
+; RV32-NEXT:    slli a3, a4, 4
+; RV32-NEXT:    bgez a3, .LBB8_172
+; RV32-NEXT:    j .LBB8_423
+; RV32-NEXT:  .LBB8_172: # %else464
+; RV32-NEXT:    slli a3, a4, 3
+; RV32-NEXT:    bgez a3, .LBB8_173
+; RV32-NEXT:    j .LBB8_424
+; RV32-NEXT:  .LBB8_173: # %else467
+; RV32-NEXT:    slli a3, a4, 2
+; RV32-NEXT:    bgez a3, .LBB8_175
+; RV32-NEXT:  .LBB8_174: # %cond.store469
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 29
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:  .LBB8_175: # %else470
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    slli a3, a4, 1
+; RV32-NEXT:    vsrl.vx v9, v8, a1
+; RV32-NEXT:    bgez a3, .LBB8_177
+; RV32-NEXT:  # %bb.176: # %cond.store472
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 30
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:  .LBB8_177: # %else473
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a3, v9
+; RV32-NEXT:    bgez a4, .LBB8_178
+; RV32-NEXT:    j .LBB8_425
+; RV32-NEXT:  .LBB8_178: # %else476
+; RV32-NEXT:    andi a4, a3, 1
+; RV32-NEXT:    beqz a4, .LBB8_179
+; RV32-NEXT:    j .LBB8_426
+; RV32-NEXT:  .LBB8_179: # %else479
+; RV32-NEXT:    andi a4, a3, 2
+; RV32-NEXT:    beqz a4, .LBB8_180
+; RV32-NEXT:    j .LBB8_427
+; RV32-NEXT:  .LBB8_180: # %else482
+; RV32-NEXT:    andi a4, a3, 4
+; RV32-NEXT:    beqz a4, .LBB8_181
+; RV32-NEXT:    j .LBB8_428
+; RV32-NEXT:  .LBB8_181: # %else485
+; RV32-NEXT:    andi a4, a3, 8
+; RV32-NEXT:    beqz a4, .LBB8_182
+; RV32-NEXT:    j .LBB8_429
+; RV32-NEXT:  .LBB8_182: # %else488
+; RV32-NEXT:    andi a4, a3, 16
+; RV32-NEXT:    beqz a4, .LBB8_183
+; RV32-NEXT:    j .LBB8_430
+; RV32-NEXT:  .LBB8_183: # %else491
+; RV32-NEXT:    andi a4, a3, 32
+; RV32-NEXT:    beqz a4, .LBB8_184
+; RV32-NEXT:    j .LBB8_431
+; RV32-NEXT:  .LBB8_184: # %else494
+; RV32-NEXT:    andi a4, a3, 64
+; RV32-NEXT:    beqz a4, .LBB8_185
+; RV32-NEXT:    j .LBB8_432
+; RV32-NEXT:  .LBB8_185: # %else497
+; RV32-NEXT:    andi a4, a3, 128
+; RV32-NEXT:    beqz a4, .LBB8_186
+; RV32-NEXT:    j .LBB8_433
+; RV32-NEXT:  .LBB8_186: # %else500
+; RV32-NEXT:    andi a4, a3, 256
+; RV32-NEXT:    beqz a4, .LBB8_187
+; RV32-NEXT:    j .LBB8_434
+; RV32-NEXT:  .LBB8_187: # %else503
+; RV32-NEXT:    andi a4, a3, 512
+; RV32-NEXT:    beqz a4, .LBB8_188
+; RV32-NEXT:    j .LBB8_435
+; RV32-NEXT:  .LBB8_188: # %else506
+; RV32-NEXT:    andi a4, a3, 1024
+; RV32-NEXT:    beqz a4, .LBB8_189
+; RV32-NEXT:    j .LBB8_436
+; RV32-NEXT:  .LBB8_189: # %else509
+; RV32-NEXT:    slli a4, a3, 20
+; RV32-NEXT:    bgez a4, .LBB8_190
+; RV32-NEXT:    j .LBB8_437
+; RV32-NEXT:  .LBB8_190: # %else512
+; RV32-NEXT:    slli a4, a3, 19
+; RV32-NEXT:    bgez a4, .LBB8_191
+; RV32-NEXT:    j .LBB8_438
+; RV32-NEXT:  .LBB8_191: # %else515
+; RV32-NEXT:    slli a4, a3, 18
+; RV32-NEXT:    bgez a4, .LBB8_193
+; RV32-NEXT:  .LBB8_192: # %cond.store517
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -1664
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a2, 0(a2)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_193: # %else518
+; RV32-NEXT:    slli a4, a3, 17
+; RV32-NEXT:    lui a2, 2
+; RV32-NEXT:    addi a2, a2, 318
+; RV32-NEXT:    add a2, sp, a2
+; RV32-NEXT:    bgez a4, .LBB8_194
+; RV32-NEXT:    j .LBB8_439
+; RV32-NEXT:  .LBB8_194: # %else521
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    bgez a4, .LBB8_195
+; RV32-NEXT:    j .LBB8_440
+; RV32-NEXT:  .LBB8_195: # %else524
+; RV32-NEXT:    slli a4, a3, 15
+; RV32-NEXT:    bgez a4, .LBB8_196
+; RV32-NEXT:    j .LBB8_441
+; RV32-NEXT:  .LBB8_196: # %else527
+; RV32-NEXT:    slli a4, a3, 14
+; RV32-NEXT:    bgez a4, .LBB8_197
+; RV32-NEXT:    j .LBB8_442
+; RV32-NEXT:  .LBB8_197: # %else530
+; RV32-NEXT:    slli a4, a3, 13
+; RV32-NEXT:    bgez a4, .LBB8_198
+; RV32-NEXT:    j .LBB8_443
+; RV32-NEXT:  .LBB8_198: # %else533
+; RV32-NEXT:    slli a4, a3, 12
+; RV32-NEXT:    bgez a4, .LBB8_199
+; RV32-NEXT:    j .LBB8_444
+; RV32-NEXT:  .LBB8_199: # %else536
+; RV32-NEXT:    slli a4, a3, 11
+; RV32-NEXT:    bgez a4, .LBB8_200
+; RV32-NEXT:    j .LBB8_445
+; RV32-NEXT:  .LBB8_200: # %else539
+; RV32-NEXT:    slli a4, a3, 10
+; RV32-NEXT:    bgez a4, .LBB8_201
+; RV32-NEXT:    j .LBB8_446
+; RV32-NEXT:  .LBB8_201: # %else542
+; RV32-NEXT:    slli a4, a3, 9
+; RV32-NEXT:    bgez a4, .LBB8_202
+; RV32-NEXT:    j .LBB8_447
+; RV32-NEXT:  .LBB8_202: # %else545
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    bgez a4, .LBB8_203
+; RV32-NEXT:    j .LBB8_448
+; RV32-NEXT:  .LBB8_203: # %else548
+; RV32-NEXT:    slli a4, a3, 7
+; RV32-NEXT:    bgez a4, .LBB8_204
+; RV32-NEXT:    j .LBB8_449
+; RV32-NEXT:  .LBB8_204: # %else551
+; RV32-NEXT:    slli a4, a3, 6
+; RV32-NEXT:    bgez a4, .LBB8_205
+; RV32-NEXT:    j .LBB8_450
+; RV32-NEXT:  .LBB8_205: # %else554
+; RV32-NEXT:    slli a4, a3, 5
+; RV32-NEXT:    bgez a4, .LBB8_206
+; RV32-NEXT:    j .LBB8_451
+; RV32-NEXT:  .LBB8_206: # %else557
+; RV32-NEXT:    slli a4, a3, 4
+; RV32-NEXT:    bgez a4, .LBB8_207
+; RV32-NEXT:    j .LBB8_452
+; RV32-NEXT:  .LBB8_207: # %else560
+; RV32-NEXT:    slli a4, a3, 3
+; RV32-NEXT:    bgez a4, .LBB8_208
+; RV32-NEXT:    j .LBB8_453
+; RV32-NEXT:  .LBB8_208: # %else563
+; RV32-NEXT:    slli a4, a3, 2
+; RV32-NEXT:    bgez a4, .LBB8_210
+; RV32-NEXT:  .LBB8_209: # %cond.store565
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 2
+; RV32-NEXT:    addi a5, a5, 384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 127(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:  .LBB8_210: # %else566
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    slli a4, a3, 1
+; RV32-NEXT:    vslidedown.vi v8, v8, 1
+; RV32-NEXT:    bgez a4, .LBB8_212
+; RV32-NEXT:  # %bb.211: # %cond.store568
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 2
+; RV32-NEXT:    addi a5, a5, 256
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a2, 0(a2)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_212: # %else569
+; RV32-NEXT:    lui a2, 2
+; RV32-NEXT:    addi a2, a2, -1841
+; RV32-NEXT:    add a4, sp, a2
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a2, v8
+; RV32-NEXT:    bgez a3, .LBB8_213
+; RV32-NEXT:    j .LBB8_454
+; RV32-NEXT:  .LBB8_213: # %else572
+; RV32-NEXT:    andi a3, a2, 1
+; RV32-NEXT:    beqz a3, .LBB8_214
+; RV32-NEXT:    j .LBB8_455
+; RV32-NEXT:  .LBB8_214: # %else575
+; RV32-NEXT:    andi a3, a2, 2
+; RV32-NEXT:    beqz a3, .LBB8_215
+; RV32-NEXT:    j .LBB8_456
+; RV32-NEXT:  .LBB8_215: # %else578
+; RV32-NEXT:    andi a3, a2, 4
+; RV32-NEXT:    beqz a3, .LBB8_216
+; RV32-NEXT:    j .LBB8_457
+; RV32-NEXT:  .LBB8_216: # %else581
+; RV32-NEXT:    andi a3, a2, 8
+; RV32-NEXT:    beqz a3, .LBB8_217
+; RV32-NEXT:    j .LBB8_458
+; RV32-NEXT:  .LBB8_217: # %else584
+; RV32-NEXT:    andi a3, a2, 16
+; RV32-NEXT:    beqz a3, .LBB8_218
+; RV32-NEXT:    j .LBB8_459
+; RV32-NEXT:  .LBB8_218: # %else587
+; RV32-NEXT:    andi a3, a2, 32
+; RV32-NEXT:    beqz a3, .LBB8_219
+; RV32-NEXT:    j .LBB8_460
+; RV32-NEXT:  .LBB8_219: # %else590
+; RV32-NEXT:    andi a3, a2, 64
+; RV32-NEXT:    beqz a3, .LBB8_220
+; RV32-NEXT:    j .LBB8_461
+; RV32-NEXT:  .LBB8_220: # %else593
+; RV32-NEXT:    andi a3, a2, 128
+; RV32-NEXT:    beqz a3, .LBB8_221
+; RV32-NEXT:    j .LBB8_462
+; RV32-NEXT:  .LBB8_221: # %else596
+; RV32-NEXT:    andi a3, a2, 256
+; RV32-NEXT:    beqz a3, .LBB8_222
+; RV32-NEXT:    j .LBB8_463
+; RV32-NEXT:  .LBB8_222: # %else599
+; RV32-NEXT:    andi a3, a2, 512
+; RV32-NEXT:    beqz a3, .LBB8_223
+; RV32-NEXT:    j .LBB8_464
+; RV32-NEXT:  .LBB8_223: # %else602
+; RV32-NEXT:    andi a3, a2, 1024
+; RV32-NEXT:    beqz a3, .LBB8_224
+; RV32-NEXT:    j .LBB8_465
+; RV32-NEXT:  .LBB8_224: # %else605
+; RV32-NEXT:    slli a3, a2, 20
+; RV32-NEXT:    bgez a3, .LBB8_225
+; RV32-NEXT:    j .LBB8_466
+; RV32-NEXT:  .LBB8_225: # %else608
+; RV32-NEXT:    slli a3, a2, 19
+; RV32-NEXT:    bgez a3, .LBB8_226
+; RV32-NEXT:    j .LBB8_467
+; RV32-NEXT:  .LBB8_226: # %else611
+; RV32-NEXT:    slli a3, a2, 18
+; RV32-NEXT:    bgez a3, .LBB8_227
+; RV32-NEXT:    j .LBB8_468
+; RV32-NEXT:  .LBB8_227: # %else614
+; RV32-NEXT:    slli a3, a2, 17
+; RV32-NEXT:    bgez a3, .LBB8_228
+; RV32-NEXT:    j .LBB8_469
+; RV32-NEXT:  .LBB8_228: # %else617
+; RV32-NEXT:    slli a3, a2, 16
+; RV32-NEXT:    bgez a3, .LBB8_230
+; RV32-NEXT:  .LBB8_229: # %cond.store619
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 2
+; RV32-NEXT:    addi a5, a5, -1920
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a3, 0(a4)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_230: # %else620
+; RV32-NEXT:    slli a4, a2, 15
+; RV32-NEXT:    lui a3, 1
+; RV32-NEXT:    addi a3, a3, 96
+; RV32-NEXT:    add a3, sp, a3
+; RV32-NEXT:    bgez a4, .LBB8_231
+; RV32-NEXT:    j .LBB8_470
+; RV32-NEXT:  .LBB8_231: # %else623
+; RV32-NEXT:    slli a4, a2, 14
+; RV32-NEXT:    bgez a4, .LBB8_232
+; RV32-NEXT:    j .LBB8_471
+; RV32-NEXT:  .LBB8_232: # %else626
+; RV32-NEXT:    slli a4, a2, 13
+; RV32-NEXT:    bgez a4, .LBB8_233
+; RV32-NEXT:    j .LBB8_472
+; RV32-NEXT:  .LBB8_233: # %else629
+; RV32-NEXT:    slli a4, a2, 12
+; RV32-NEXT:    bgez a4, .LBB8_234
+; RV32-NEXT:    j .LBB8_473
+; RV32-NEXT:  .LBB8_234: # %else632
+; RV32-NEXT:    slli a4, a2, 11
+; RV32-NEXT:    bgez a4, .LBB8_235
+; RV32-NEXT:    j .LBB8_474
+; RV32-NEXT:  .LBB8_235: # %else635
+; RV32-NEXT:    slli a4, a2, 10
+; RV32-NEXT:    bgez a4, .LBB8_236
+; RV32-NEXT:    j .LBB8_475
+; RV32-NEXT:  .LBB8_236: # %else638
+; RV32-NEXT:    slli a4, a2, 9
+; RV32-NEXT:    bgez a4, .LBB8_237
+; RV32-NEXT:    j .LBB8_476
+; RV32-NEXT:  .LBB8_237: # %else641
+; RV32-NEXT:    slli a4, a2, 8
+; RV32-NEXT:    bgez a4, .LBB8_238
+; RV32-NEXT:    j .LBB8_477
+; RV32-NEXT:  .LBB8_238: # %else644
+; RV32-NEXT:    slli a4, a2, 7
+; RV32-NEXT:    bgez a4, .LBB8_239
+; RV32-NEXT:    j .LBB8_478
+; RV32-NEXT:  .LBB8_239: # %else647
+; RV32-NEXT:    slli a4, a2, 6
+; RV32-NEXT:    bgez a4, .LBB8_240
+; RV32-NEXT:    j .LBB8_479
+; RV32-NEXT:  .LBB8_240: # %else650
+; RV32-NEXT:    slli a4, a2, 5
+; RV32-NEXT:    bgez a4, .LBB8_241
+; RV32-NEXT:    j .LBB8_480
+; RV32-NEXT:  .LBB8_241: # %else653
+; RV32-NEXT:    slli a4, a2, 4
+; RV32-NEXT:    bgez a4, .LBB8_242
+; RV32-NEXT:    j .LBB8_481
+; RV32-NEXT:  .LBB8_242: # %else656
+; RV32-NEXT:    slli a4, a2, 3
+; RV32-NEXT:    bgez a4, .LBB8_243
+; RV32-NEXT:    j .LBB8_482
+; RV32-NEXT:  .LBB8_243: # %else659
+; RV32-NEXT:    slli a4, a2, 2
+; RV32-NEXT:    bgez a4, .LBB8_245
+; RV32-NEXT:  .LBB8_244: # %cond.store661
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 1
+; RV32-NEXT:    addi a5, a5, 384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 381(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:  .LBB8_245: # %else662
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    slli a4, a2, 1
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    bgez a4, .LBB8_247
+; RV32-NEXT:  # %bb.246: # %cond.store664
+; RV32-NEXT:    li a1, 128
+; RV32-NEXT:    li a4, 17
+; RV32-NEXT:    slli a4, a4, 8
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a4)
+; RV32-NEXT:    lbu a1, 254(a3)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_247: # %else665
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    bgez a2, .LBB8_249
+; RV32-NEXT:  # %bb.248: # %cond.store667
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a4, 1
+; RV32-NEXT:    addi a4, a4, 128
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a4)
+; RV32-NEXT:    lbu a2, 127(a3)
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:  .LBB8_249: # %else668
+; RV32-NEXT:    andi a2, a1, 1
+; RV32-NEXT:    beqz a2, .LBB8_251
+; RV32-NEXT:  # %bb.250: # %cond.store670
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a4, 1
+; RV32-NEXT:    add a4, sp, a4
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a4)
+; RV32-NEXT:    lbu a2, 0(a3)
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:  .LBB8_251: # %else671
+; RV32-NEXT:    andi a3, a1, 2
+; RV32-NEXT:    addi a2, sp, 2033
+; RV32-NEXT:    beqz a3, .LBB8_252
+; RV32-NEXT:    j .LBB8_483
+; RV32-NEXT:  .LBB8_252: # %else674
+; RV32-NEXT:    andi a3, a1, 4
+; RV32-NEXT:    beqz a3, .LBB8_253
+; RV32-NEXT:    j .LBB8_484
+; RV32-NEXT:  .LBB8_253: # %else677
+; RV32-NEXT:    andi a3, a1, 8
+; RV32-NEXT:    beqz a3, .LBB8_254
+; RV32-NEXT:    j .LBB8_485
+; RV32-NEXT:  .LBB8_254: # %else680
+; RV32-NEXT:    andi a3, a1, 16
+; RV32-NEXT:    beqz a3, .LBB8_255
+; RV32-NEXT:    j .LBB8_486
+; RV32-NEXT:  .LBB8_255: # %else683
+; RV32-NEXT:    andi a3, a1, 32
+; RV32-NEXT:    beqz a3, .LBB8_256
+; RV32-NEXT:    j .LBB8_487
+; RV32-NEXT:  .LBB8_256: # %else686
+; RV32-NEXT:    andi a3, a1, 64
+; RV32-NEXT:    beqz a3, .LBB8_257
+; RV32-NEXT:    j .LBB8_488
+; RV32-NEXT:  .LBB8_257: # %else689
+; RV32-NEXT:    andi a3, a1, 128
+; RV32-NEXT:    beqz a3, .LBB8_258
+; RV32-NEXT:    j .LBB8_489
+; RV32-NEXT:  .LBB8_258: # %else692
+; RV32-NEXT:    andi a3, a1, 256
+; RV32-NEXT:    beqz a3, .LBB8_259
+; RV32-NEXT:    j .LBB8_490
+; RV32-NEXT:  .LBB8_259: # %else695
+; RV32-NEXT:    andi a3, a1, 512
+; RV32-NEXT:    beqz a3, .LBB8_260
+; RV32-NEXT:    j .LBB8_491
+; RV32-NEXT:  .LBB8_260: # %else698
+; RV32-NEXT:    andi a3, a1, 1024
+; RV32-NEXT:    beqz a3, .LBB8_261
+; RV32-NEXT:    j .LBB8_492
+; RV32-NEXT:  .LBB8_261: # %else701
+; RV32-NEXT:    slli a3, a1, 20
+; RV32-NEXT:    bgez a3, .LBB8_262
+; RV32-NEXT:    j .LBB8_493
+; RV32-NEXT:  .LBB8_262: # %else704
+; RV32-NEXT:    slli a3, a1, 19
+; RV32-NEXT:    bgez a3, .LBB8_263
+; RV32-NEXT:    j .LBB8_494
+; RV32-NEXT:  .LBB8_263: # %else707
+; RV32-NEXT:    slli a3, a1, 18
+; RV32-NEXT:    bgez a3, .LBB8_264
+; RV32-NEXT:    j .LBB8_495
+; RV32-NEXT:  .LBB8_264: # %else710
+; RV32-NEXT:    slli a3, a1, 17
+; RV32-NEXT:    bgez a3, .LBB8_265
+; RV32-NEXT:    j .LBB8_496
+; RV32-NEXT:  .LBB8_265: # %else713
+; RV32-NEXT:    slli a3, a1, 16
+; RV32-NEXT:    bgez a3, .LBB8_266
+; RV32-NEXT:    j .LBB8_497
+; RV32-NEXT:  .LBB8_266: # %else716
+; RV32-NEXT:    slli a3, a1, 15
+; RV32-NEXT:    bgez a3, .LBB8_267
+; RV32-NEXT:    j .LBB8_498
+; RV32-NEXT:  .LBB8_267: # %else719
+; RV32-NEXT:    slli a3, a1, 14
+; RV32-NEXT:    bgez a3, .LBB8_268
+; RV32-NEXT:    j .LBB8_499
+; RV32-NEXT:  .LBB8_268: # %else722
+; RV32-NEXT:    slli a2, a1, 13
+; RV32-NEXT:    bgez a2, .LBB8_269
+; RV32-NEXT:    j .LBB8_500
+; RV32-NEXT:  .LBB8_269: # %else725
+; RV32-NEXT:    slli a2, a1, 12
+; RV32-NEXT:    bgez a2, .LBB8_270
+; RV32-NEXT:    j .LBB8_501
+; RV32-NEXT:  .LBB8_270: # %else728
+; RV32-NEXT:    slli a2, a1, 11
+; RV32-NEXT:    bgez a2, .LBB8_271
+; RV32-NEXT:    j .LBB8_502
+; RV32-NEXT:  .LBB8_271: # %else731
+; RV32-NEXT:    slli a2, a1, 10
+; RV32-NEXT:    bgez a2, .LBB8_272
+; RV32-NEXT:    j .LBB8_503
+; RV32-NEXT:  .LBB8_272: # %else734
+; RV32-NEXT:    slli a2, a1, 9
+; RV32-NEXT:    bgez a2, .LBB8_273
+; RV32-NEXT:    j .LBB8_504
+; RV32-NEXT:  .LBB8_273: # %else737
+; RV32-NEXT:    slli a2, a1, 8
+; RV32-NEXT:    bgez a2, .LBB8_274
+; RV32-NEXT:    j .LBB8_505
+; RV32-NEXT:  .LBB8_274: # %else740
+; RV32-NEXT:    slli a2, a1, 7
+; RV32-NEXT:    bgez a2, .LBB8_275
+; RV32-NEXT:    j .LBB8_506
+; RV32-NEXT:  .LBB8_275: # %else743
+; RV32-NEXT:    slli a2, a1, 6
+; RV32-NEXT:    bgez a2, .LBB8_276
+; RV32-NEXT:    j .LBB8_507
+; RV32-NEXT:  .LBB8_276: # %else746
+; RV32-NEXT:    slli a2, a1, 5
+; RV32-NEXT:    bgez a2, .LBB8_277
+; RV32-NEXT:    j .LBB8_508
+; RV32-NEXT:  .LBB8_277: # %else749
+; RV32-NEXT:    slli a2, a1, 4
+; RV32-NEXT:    bgez a2, .LBB8_278
+; RV32-NEXT:    j .LBB8_509
+; RV32-NEXT:  .LBB8_278: # %else752
+; RV32-NEXT:    slli a2, a1, 3
+; RV32-NEXT:    bgez a2, .LBB8_279
+; RV32-NEXT:    j .LBB8_510
+; RV32-NEXT:  .LBB8_279: # %else755
+; RV32-NEXT:    slli a2, a1, 2
+; RV32-NEXT:    bgez a2, .LBB8_280
+; RV32-NEXT:    j .LBB8_511
+; RV32-NEXT:  .LBB8_280: # %else758
+; RV32-NEXT:    slli a2, a1, 1
+; RV32-NEXT:    bgez a2, .LBB8_281
+; RV32-NEXT:    j .LBB8_512
+; RV32-NEXT:  .LBB8_281: # %else761
+; RV32-NEXT:    bgez a1, .LBB8_283
+; RV32-NEXT:  .LBB8_282: # %cond.store763
+; RV32-NEXT:    li a1, 128
+; RV32-NEXT:    addi a2, sp, 128
+; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a2)
+; RV32-NEXT:    lbu a1, 255(sp)
+; RV32-NEXT:    sb a1, 0(a0)
+; RV32-NEXT:  .LBB8_283: # %else764
+; RV32-NEXT:    lui a0, 6
+; RV32-NEXT:    addi a0, a0, 256
+; RV32-NEXT:    sub sp, s0, a0
+; RV32-NEXT:    lui a0, 6
+; RV32-NEXT:    addi a0, a0, -1776
+; RV32-NEXT:    add sp, sp, a0
+; RV32-NEXT:    lw ra, 2028(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 2024(sp) # 4-byte Folded Reload
+; RV32-NEXT:    addi sp, sp, 2032
+; RV32-NEXT:    ret
+; RV32-NEXT:  .LBB8_284: # %cond.store
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v16, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 2
+; RV32-NEXT:    bnez a1, .LBB8_285
+; RV32-NEXT:    j .LBB8_2
+; RV32-NEXT:  .LBB8_285: # %cond.store1
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 4
+; RV32-NEXT:    bnez a1, .LBB8_286
+; RV32-NEXT:    j .LBB8_3
+; RV32-NEXT:  .LBB8_286: # %cond.store4
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 2
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 8
+; RV32-NEXT:    bnez a1, .LBB8_287
+; RV32-NEXT:    j .LBB8_4
+; RV32-NEXT:  .LBB8_287: # %cond.store7
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 3
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 16
+; RV32-NEXT:    bnez a1, .LBB8_288
+; RV32-NEXT:    j .LBB8_5
+; RV32-NEXT:  .LBB8_288: # %cond.store10
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 4
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 32
+; RV32-NEXT:    bnez a1, .LBB8_289
+; RV32-NEXT:    j .LBB8_6
+; RV32-NEXT:  .LBB8_289: # %cond.store13
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 5
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 64
+; RV32-NEXT:    bnez a1, .LBB8_290
+; RV32-NEXT:    j .LBB8_7
+; RV32-NEXT:  .LBB8_290: # %cond.store16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 6
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 128
+; RV32-NEXT:    bnez a1, .LBB8_291
+; RV32-NEXT:    j .LBB8_8
+; RV32-NEXT:  .LBB8_291: # %cond.store19
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 7
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 256
+; RV32-NEXT:    bnez a1, .LBB8_292
+; RV32-NEXT:    j .LBB8_9
+; RV32-NEXT:  .LBB8_292: # %cond.store22
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 8
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 512
+; RV32-NEXT:    bnez a1, .LBB8_293
+; RV32-NEXT:    j .LBB8_10
+; RV32-NEXT:  .LBB8_293: # %cond.store25
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 9
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a1, a2, 1024
+; RV32-NEXT:    bnez a1, .LBB8_294
+; RV32-NEXT:    j .LBB8_11
+; RV32-NEXT:  .LBB8_294: # %cond.store28
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 10
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 20
+; RV32-NEXT:    bltz a1, .LBB8_295
+; RV32-NEXT:    j .LBB8_12
+; RV32-NEXT:  .LBB8_295: # %cond.store31
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 11
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 19
+; RV32-NEXT:    bltz a1, .LBB8_296
+; RV32-NEXT:    j .LBB8_13
+; RV32-NEXT:  .LBB8_296: # %cond.store34
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 12
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 18
+; RV32-NEXT:    bltz a1, .LBB8_297
+; RV32-NEXT:    j .LBB8_14
+; RV32-NEXT:  .LBB8_297: # %cond.store37
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 13
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 17
+; RV32-NEXT:    bltz a1, .LBB8_298
+; RV32-NEXT:    j .LBB8_15
+; RV32-NEXT:  .LBB8_298: # %cond.store40
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 14
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 16
+; RV32-NEXT:    bltz a1, .LBB8_299
+; RV32-NEXT:    j .LBB8_16
+; RV32-NEXT:  .LBB8_299: # %cond.store43
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v16, 15
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 15
+; RV32-NEXT:    bltz a1, .LBB8_300
+; RV32-NEXT:    j .LBB8_17
+; RV32-NEXT:  .LBB8_300: # %cond.store46
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 14
+; RV32-NEXT:    bltz a1, .LBB8_301
+; RV32-NEXT:    j .LBB8_18
+; RV32-NEXT:  .LBB8_301: # %cond.store49
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 17
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 13
+; RV32-NEXT:    bltz a1, .LBB8_302
+; RV32-NEXT:    j .LBB8_19
+; RV32-NEXT:  .LBB8_302: # %cond.store52
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 18
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 12
+; RV32-NEXT:    bltz a1, .LBB8_303
+; RV32-NEXT:    j .LBB8_20
+; RV32-NEXT:  .LBB8_303: # %cond.store55
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 19
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 11
+; RV32-NEXT:    bltz a1, .LBB8_304
+; RV32-NEXT:    j .LBB8_21
+; RV32-NEXT:  .LBB8_304: # %cond.store58
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 20
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 10
+; RV32-NEXT:    bltz a1, .LBB8_305
+; RV32-NEXT:    j .LBB8_22
+; RV32-NEXT:  .LBB8_305: # %cond.store61
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 21
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 9
+; RV32-NEXT:    bltz a1, .LBB8_306
+; RV32-NEXT:    j .LBB8_23
+; RV32-NEXT:  .LBB8_306: # %cond.store64
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 22
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 8
+; RV32-NEXT:    bltz a1, .LBB8_307
+; RV32-NEXT:    j .LBB8_24
+; RV32-NEXT:  .LBB8_307: # %cond.store67
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 23
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 7
+; RV32-NEXT:    bltz a1, .LBB8_308
+; RV32-NEXT:    j .LBB8_25
+; RV32-NEXT:  .LBB8_308: # %cond.store70
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 24
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 6
+; RV32-NEXT:    bltz a1, .LBB8_309
+; RV32-NEXT:    j .LBB8_26
+; RV32-NEXT:  .LBB8_309: # %cond.store73
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 25
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 5
+; RV32-NEXT:    bltz a1, .LBB8_310
+; RV32-NEXT:    j .LBB8_27
+; RV32-NEXT:  .LBB8_310: # %cond.store76
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 26
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 4
+; RV32-NEXT:    bltz a1, .LBB8_311
+; RV32-NEXT:    j .LBB8_28
+; RV32-NEXT:  .LBB8_311: # %cond.store79
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v16, 27
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a1, a2, 3
+; RV32-NEXT:    bgez a1, .LBB8_513
+; RV32-NEXT:    j .LBB8_29
+; RV32-NEXT:  .LBB8_513: # %cond.store79
+; RV32-NEXT:    j .LBB8_30
+; RV32-NEXT:  .LBB8_312: # %cond.store94
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1016(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 2
+; RV32-NEXT:    bnez a4, .LBB8_313
+; RV32-NEXT:    j .LBB8_38
+; RV32-NEXT:  .LBB8_313: # %cond.store97
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -128
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 889(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 4
+; RV32-NEXT:    bnez a4, .LBB8_314
+; RV32-NEXT:    j .LBB8_39
+; RV32-NEXT:  .LBB8_314: # %cond.store100
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -256
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 762(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 8
+; RV32-NEXT:    bnez a4, .LBB8_315
+; RV32-NEXT:    j .LBB8_40
+; RV32-NEXT:  .LBB8_315: # %cond.store103
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 635(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 16
+; RV32-NEXT:    bnez a4, .LBB8_316
+; RV32-NEXT:    j .LBB8_41
+; RV32-NEXT:  .LBB8_316: # %cond.store106
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -512
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 508(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 32
+; RV32-NEXT:    bnez a4, .LBB8_317
+; RV32-NEXT:    j .LBB8_42
+; RV32-NEXT:  .LBB8_317: # %cond.store109
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -640
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 381(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 64
+; RV32-NEXT:    bnez a4, .LBB8_318
+; RV32-NEXT:    j .LBB8_43
+; RV32-NEXT:  .LBB8_318: # %cond.store112
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -768
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 254(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 128
+; RV32-NEXT:    bnez a4, .LBB8_319
+; RV32-NEXT:    j .LBB8_44
+; RV32-NEXT:  .LBB8_319: # %cond.store115
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -896
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 127(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 256
+; RV32-NEXT:    beqz a4, .LBB8_514
+; RV32-NEXT:    j .LBB8_45
+; RV32-NEXT:  .LBB8_514: # %cond.store115
+; RV32-NEXT:    j .LBB8_46
+; RV32-NEXT:  .LBB8_320: # %cond.store121
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -1152
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 2032(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    andi a4, a3, 1024
+; RV32-NEXT:    bnez a4, .LBB8_321
+; RV32-NEXT:    j .LBB8_48
+; RV32-NEXT:  .LBB8_321: # %cond.store124
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -1280
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1905(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 20
+; RV32-NEXT:    bltz a4, .LBB8_322
+; RV32-NEXT:    j .LBB8_49
+; RV32-NEXT:  .LBB8_322: # %cond.store127
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -1408
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1778(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 19
+; RV32-NEXT:    bltz a4, .LBB8_323
+; RV32-NEXT:    j .LBB8_50
+; RV32-NEXT:  .LBB8_323: # %cond.store130
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -1536
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1651(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 18
+; RV32-NEXT:    bltz a4, .LBB8_324
+; RV32-NEXT:    j .LBB8_51
+; RV32-NEXT:  .LBB8_324: # %cond.store133
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -1664
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1524(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 17
+; RV32-NEXT:    bltz a4, .LBB8_325
+; RV32-NEXT:    j .LBB8_52
+; RV32-NEXT:  .LBB8_325: # %cond.store136
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -1792
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1397(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    bltz a4, .LBB8_326
+; RV32-NEXT:    j .LBB8_53
+; RV32-NEXT:  .LBB8_326: # %cond.store139
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 6
+; RV32-NEXT:    addi a5, a5, -1920
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1270(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 15
+; RV32-NEXT:    bltz a4, .LBB8_327
+; RV32-NEXT:    j .LBB8_54
+; RV32-NEXT:  .LBB8_327: # %cond.store142
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 11
+; RV32-NEXT:    slli a5, a5, 11
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1143(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 14
+; RV32-NEXT:    bltz a4, .LBB8_328
+; RV32-NEXT:    j .LBB8_55
+; RV32-NEXT:  .LBB8_328: # %cond.store145
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 1920
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1016(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 13
+; RV32-NEXT:    bltz a4, .LBB8_329
+; RV32-NEXT:    j .LBB8_56
+; RV32-NEXT:  .LBB8_329: # %cond.store148
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 1792
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 889(a2)
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    slli a4, a3, 12
+; RV32-NEXT:    bltz a4, .LBB8_330
+; RV32-NEXT:    j .LBB8_57
+; RV32-NEXT:  .LBB8_330: # %cond.store151
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 1664
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 762(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 11
+; RV32-NEXT:    bltz a4, .LBB8_331
+; RV32-NEXT:    j .LBB8_58
+; RV32-NEXT:  .LBB8_331: # %cond.store154
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 1536
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 635(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 10
+; RV32-NEXT:    bltz a4, .LBB8_332
+; RV32-NEXT:    j .LBB8_59
+; RV32-NEXT:  .LBB8_332: # %cond.store157
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 1408
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 508(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 9
+; RV32-NEXT:    bltz a4, .LBB8_333
+; RV32-NEXT:    j .LBB8_60
+; RV32-NEXT:  .LBB8_333: # %cond.store160
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 1280
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 381(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    bltz a4, .LBB8_334
+; RV32-NEXT:    j .LBB8_61
+; RV32-NEXT:  .LBB8_334: # %cond.store163
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 1152
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 254(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 7
+; RV32-NEXT:    bltz a4, .LBB8_335
+; RV32-NEXT:    j .LBB8_62
+; RV32-NEXT:  .LBB8_335: # %cond.store166
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 21
+; RV32-NEXT:    slli a5, a5, 10
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 127(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 6
+; RV32-NEXT:    bgez a4, .LBB8_515
+; RV32-NEXT:    j .LBB8_63
+; RV32-NEXT:  .LBB8_515: # %cond.store166
+; RV32-NEXT:    j .LBB8_64
+; RV32-NEXT:  .LBB8_336: # %cond.store172
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 768
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 2032(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a2, a3, 4
+; RV32-NEXT:    bltz a2, .LBB8_337
+; RV32-NEXT:    j .LBB8_66
+; RV32-NEXT:  .LBB8_337: # %cond.store175
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 640
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1905(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a2, a3, 3
+; RV32-NEXT:    bltz a2, .LBB8_338
+; RV32-NEXT:    j .LBB8_67
+; RV32-NEXT:  .LBB8_338: # %cond.store178
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 512
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1778(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a2, a3, 2
+; RV32-NEXT:    bgez a2, .LBB8_516
+; RV32-NEXT:    j .LBB8_68
+; RV32-NEXT:  .LBB8_516: # %cond.store178
+; RV32-NEXT:    j .LBB8_69
+; RV32-NEXT:  .LBB8_339: # %cond.store187
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, 128
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 1397(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 1
+; RV32-NEXT:    bnez a3, .LBB8_340
+; RV32-NEXT:    j .LBB8_73
+; RV32-NEXT:  .LBB8_340: # %cond.store190
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 1270(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 2
+; RV32-NEXT:    bnez a3, .LBB8_341
+; RV32-NEXT:    j .LBB8_74
+; RV32-NEXT:  .LBB8_341: # %cond.store193
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -128
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 1143(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 4
+; RV32-NEXT:    bnez a3, .LBB8_342
+; RV32-NEXT:    j .LBB8_75
+; RV32-NEXT:  .LBB8_342: # %cond.store196
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -256
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 1016(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 8
+; RV32-NEXT:    bnez a3, .LBB8_343
+; RV32-NEXT:    j .LBB8_76
+; RV32-NEXT:  .LBB8_343: # %cond.store199
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 889(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 16
+; RV32-NEXT:    bnez a3, .LBB8_344
+; RV32-NEXT:    j .LBB8_77
+; RV32-NEXT:  .LBB8_344: # %cond.store202
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -512
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 762(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 32
+; RV32-NEXT:    bnez a3, .LBB8_345
+; RV32-NEXT:    j .LBB8_78
+; RV32-NEXT:  .LBB8_345: # %cond.store205
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -640
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 635(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 64
+; RV32-NEXT:    bnez a3, .LBB8_346
+; RV32-NEXT:    j .LBB8_79
+; RV32-NEXT:  .LBB8_346: # %cond.store208
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -768
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 508(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 128
+; RV32-NEXT:    bnez a3, .LBB8_347
+; RV32-NEXT:    j .LBB8_80
+; RV32-NEXT:  .LBB8_347: # %cond.store211
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -896
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 381(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 256
+; RV32-NEXT:    bnez a3, .LBB8_348
+; RV32-NEXT:    j .LBB8_81
+; RV32-NEXT:  .LBB8_348: # %cond.store214
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    li a5, 19
+; RV32-NEXT:    slli a5, a5, 10
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 254(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 512
+; RV32-NEXT:    bnez a3, .LBB8_349
+; RV32-NEXT:    j .LBB8_82
+; RV32-NEXT:  .LBB8_349: # %cond.store217
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -1152
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 127(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a2, 1024
+; RV32-NEXT:    beqz a3, .LBB8_517
+; RV32-NEXT:    j .LBB8_83
+; RV32-NEXT:  .LBB8_517: # %cond.store217
+; RV32-NEXT:    j .LBB8_84
+; RV32-NEXT:  .LBB8_350: # %cond.store223
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -1408
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 2032(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 19
+; RV32-NEXT:    bltz a4, .LBB8_351
+; RV32-NEXT:    j .LBB8_86
+; RV32-NEXT:  .LBB8_351: # %cond.store226
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -1536
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1905(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 18
+; RV32-NEXT:    bltz a4, .LBB8_352
+; RV32-NEXT:    j .LBB8_87
+; RV32-NEXT:  .LBB8_352: # %cond.store229
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -1664
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1778(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 17
+; RV32-NEXT:    bltz a4, .LBB8_353
+; RV32-NEXT:    j .LBB8_88
+; RV32-NEXT:  .LBB8_353: # %cond.store232
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -1792
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1651(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 16
+; RV32-NEXT:    bltz a4, .LBB8_354
+; RV32-NEXT:    j .LBB8_89
+; RV32-NEXT:  .LBB8_354: # %cond.store235
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 5
+; RV32-NEXT:    addi a5, a5, -1920
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1524(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 15
+; RV32-NEXT:    bltz a4, .LBB8_355
+; RV32-NEXT:    j .LBB8_90
+; RV32-NEXT:  .LBB8_355: # %cond.store238
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 9
+; RV32-NEXT:    slli a5, a5, 11
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1397(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 14
+; RV32-NEXT:    bltz a4, .LBB8_356
+; RV32-NEXT:    j .LBB8_91
+; RV32-NEXT:  .LBB8_356: # %cond.store241
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 1920
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1270(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 13
+; RV32-NEXT:    bltz a4, .LBB8_357
+; RV32-NEXT:    j .LBB8_92
+; RV32-NEXT:  .LBB8_357: # %cond.store244
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 1792
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1143(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 12
+; RV32-NEXT:    bltz a4, .LBB8_358
+; RV32-NEXT:    j .LBB8_93
+; RV32-NEXT:  .LBB8_358: # %cond.store247
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 1664
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1016(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 11
+; RV32-NEXT:    bltz a4, .LBB8_359
+; RV32-NEXT:    j .LBB8_94
+; RV32-NEXT:  .LBB8_359: # %cond.store250
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 1536
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 889(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 10
+; RV32-NEXT:    bltz a4, .LBB8_360
+; RV32-NEXT:    j .LBB8_95
+; RV32-NEXT:  .LBB8_360: # %cond.store253
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 1408
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 762(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 9
+; RV32-NEXT:    bltz a4, .LBB8_361
+; RV32-NEXT:    j .LBB8_96
+; RV32-NEXT:  .LBB8_361: # %cond.store256
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 1280
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 635(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 8
+; RV32-NEXT:    bltz a4, .LBB8_362
+; RV32-NEXT:    j .LBB8_97
+; RV32-NEXT:  .LBB8_362: # %cond.store259
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 1152
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 508(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 7
+; RV32-NEXT:    bltz a4, .LBB8_363
+; RV32-NEXT:    j .LBB8_98
+; RV32-NEXT:  .LBB8_363: # %cond.store262
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 17
+; RV32-NEXT:    slli a5, a5, 10
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 381(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 6
+; RV32-NEXT:    bltz a4, .LBB8_364
+; RV32-NEXT:    j .LBB8_99
+; RV32-NEXT:  .LBB8_364: # %cond.store265
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 896
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 254(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 5
+; RV32-NEXT:    bltz a4, .LBB8_365
+; RV32-NEXT:    j .LBB8_100
+; RV32-NEXT:  .LBB8_365: # %cond.store268
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 768
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 127(a3)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a2, 4
+; RV32-NEXT:    bgez a4, .LBB8_518
+; RV32-NEXT:    j .LBB8_101
+; RV32-NEXT:  .LBB8_518: # %cond.store268
+; RV32-NEXT:    j .LBB8_102
+; RV32-NEXT:  .LBB8_366: # %cond.store283
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, 128
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1651(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 1
+; RV32-NEXT:    bnez a2, .LBB8_367
+; RV32-NEXT:    j .LBB8_110
+; RV32-NEXT:  .LBB8_367: # %cond.store286
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1524(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 2
+; RV32-NEXT:    bnez a2, .LBB8_368
+; RV32-NEXT:    j .LBB8_111
+; RV32-NEXT:  .LBB8_368: # %cond.store289
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -128
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1397(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 4
+; RV32-NEXT:    bnez a2, .LBB8_369
+; RV32-NEXT:    j .LBB8_112
+; RV32-NEXT:  .LBB8_369: # %cond.store292
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -256
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1270(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 8
+; RV32-NEXT:    bnez a2, .LBB8_370
+; RV32-NEXT:    j .LBB8_113
+; RV32-NEXT:  .LBB8_370: # %cond.store295
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1143(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 16
+; RV32-NEXT:    bnez a2, .LBB8_371
+; RV32-NEXT:    j .LBB8_114
+; RV32-NEXT:  .LBB8_371: # %cond.store298
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    li a5, 31
+; RV32-NEXT:    slli a5, a5, 9
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 1016(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 32
+; RV32-NEXT:    bnez a2, .LBB8_372
+; RV32-NEXT:    j .LBB8_115
+; RV32-NEXT:  .LBB8_372: # %cond.store301
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -640
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 889(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 64
+; RV32-NEXT:    bnez a2, .LBB8_373
+; RV32-NEXT:    j .LBB8_116
+; RV32-NEXT:  .LBB8_373: # %cond.store304
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -768
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 762(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 128
+; RV32-NEXT:    bnez a2, .LBB8_374
+; RV32-NEXT:    j .LBB8_117
+; RV32-NEXT:  .LBB8_374: # %cond.store307
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -896
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 635(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 256
+; RV32-NEXT:    bnez a2, .LBB8_375
+; RV32-NEXT:    j .LBB8_118
+; RV32-NEXT:  .LBB8_375: # %cond.store310
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    li a5, 15
+; RV32-NEXT:    slli a5, a5, 10
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 508(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 512
+; RV32-NEXT:    bnez a2, .LBB8_376
+; RV32-NEXT:    j .LBB8_119
+; RV32-NEXT:  .LBB8_376: # %cond.store313
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -1152
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 381(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a2, a3, 1024
+; RV32-NEXT:    bnez a2, .LBB8_377
+; RV32-NEXT:    j .LBB8_120
+; RV32-NEXT:  .LBB8_377: # %cond.store316
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -1280
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 254(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a2, a3, 20
+; RV32-NEXT:    bltz a2, .LBB8_378
+; RV32-NEXT:    j .LBB8_121
+; RV32-NEXT:  .LBB8_378: # %cond.store319
+; RV32-NEXT:    li a2, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -1408
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a2, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a2, 127(a4)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a2, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a2, a3, 19
+; RV32-NEXT:    bgez a2, .LBB8_519
+; RV32-NEXT:    j .LBB8_122
+; RV32-NEXT:  .LBB8_519: # %cond.store319
+; RV32-NEXT:    j .LBB8_123
+; RV32-NEXT:  .LBB8_379: # %cond.store325
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -1664
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 2032(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 17
+; RV32-NEXT:    bltz a4, .LBB8_380
+; RV32-NEXT:    j .LBB8_125
+; RV32-NEXT:  .LBB8_380: # %cond.store328
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -1792
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1905(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    bltz a4, .LBB8_381
+; RV32-NEXT:    j .LBB8_126
+; RV32-NEXT:  .LBB8_381: # %cond.store331
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 4
+; RV32-NEXT:    addi a5, a5, -1920
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1778(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 15
+; RV32-NEXT:    bltz a4, .LBB8_382
+; RV32-NEXT:    j .LBB8_127
+; RV32-NEXT:  .LBB8_382: # %cond.store334
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 7
+; RV32-NEXT:    slli a5, a5, 11
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1651(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 14
+; RV32-NEXT:    bltz a4, .LBB8_383
+; RV32-NEXT:    j .LBB8_128
+; RV32-NEXT:  .LBB8_383: # %cond.store337
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 1920
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1524(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 13
+; RV32-NEXT:    bltz a4, .LBB8_384
+; RV32-NEXT:    j .LBB8_129
+; RV32-NEXT:  .LBB8_384: # %cond.store340
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 1792
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1397(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 12
+; RV32-NEXT:    bltz a4, .LBB8_385
+; RV32-NEXT:    j .LBB8_130
+; RV32-NEXT:  .LBB8_385: # %cond.store343
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 1664
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1270(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 11
+; RV32-NEXT:    bltz a4, .LBB8_386
+; RV32-NEXT:    j .LBB8_131
+; RV32-NEXT:  .LBB8_386: # %cond.store346
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 27
+; RV32-NEXT:    slli a5, a5, 9
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1143(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 10
+; RV32-NEXT:    bltz a4, .LBB8_387
+; RV32-NEXT:    j .LBB8_132
+; RV32-NEXT:  .LBB8_387: # %cond.store349
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 1408
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 1016(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 9
+; RV32-NEXT:    bltz a4, .LBB8_388
+; RV32-NEXT:    j .LBB8_133
+; RV32-NEXT:  .LBB8_388: # %cond.store352
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 1280
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 889(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 8
+; RV32-NEXT:    bltz a4, .LBB8_389
+; RV32-NEXT:    j .LBB8_134
+; RV32-NEXT:  .LBB8_389: # %cond.store355
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 1152
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 762(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 7
+; RV32-NEXT:    bltz a4, .LBB8_390
+; RV32-NEXT:    j .LBB8_135
+; RV32-NEXT:  .LBB8_390: # %cond.store358
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 13
+; RV32-NEXT:    slli a5, a5, 10
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 635(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 6
+; RV32-NEXT:    bltz a4, .LBB8_391
+; RV32-NEXT:    j .LBB8_136
+; RV32-NEXT:  .LBB8_391: # %cond.store361
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 896
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 508(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 5
+; RV32-NEXT:    bltz a4, .LBB8_392
+; RV32-NEXT:    j .LBB8_137
+; RV32-NEXT:  .LBB8_392: # %cond.store364
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 768
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 381(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 4
+; RV32-NEXT:    bltz a4, .LBB8_393
+; RV32-NEXT:    j .LBB8_138
+; RV32-NEXT:  .LBB8_393: # %cond.store367
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 640
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 254(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 3
+; RV32-NEXT:    bltz a4, .LBB8_394
+; RV32-NEXT:    j .LBB8_139
+; RV32-NEXT:  .LBB8_394: # %cond.store370
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 25
+; RV32-NEXT:    slli a5, a5, 9
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a4, 127(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 2
+; RV32-NEXT:    bgez a4, .LBB8_520
+; RV32-NEXT:    j .LBB8_140
+; RV32-NEXT:  .LBB8_520: # %cond.store370
+; RV32-NEXT:    j .LBB8_141
+; RV32-NEXT:  .LBB8_395: # %cond.store379
+; RV32-NEXT:    li a3, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, 128
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a3, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v16, (a5)
+; RV32-NEXT:    lbu a3, 1874(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a3, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a3, a4, 1
+; RV32-NEXT:    bnez a3, .LBB8_396
+; RV32-NEXT:    j .LBB8_145
+; RV32-NEXT:  .LBB8_396: # %cond.store382
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v24, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 2
+; RV32-NEXT:    bnez a3, .LBB8_397
+; RV32-NEXT:    j .LBB8_146
+; RV32-NEXT:  .LBB8_397: # %cond.store385
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 1
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 4
+; RV32-NEXT:    bnez a3, .LBB8_398
+; RV32-NEXT:    j .LBB8_147
+; RV32-NEXT:  .LBB8_398: # %cond.store388
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 2
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 8
+; RV32-NEXT:    bnez a3, .LBB8_399
+; RV32-NEXT:    j .LBB8_148
+; RV32-NEXT:  .LBB8_399: # %cond.store391
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 3
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 16
+; RV32-NEXT:    bnez a3, .LBB8_400
+; RV32-NEXT:    j .LBB8_149
+; RV32-NEXT:  .LBB8_400: # %cond.store394
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 4
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 32
+; RV32-NEXT:    bnez a3, .LBB8_401
+; RV32-NEXT:    j .LBB8_150
+; RV32-NEXT:  .LBB8_401: # %cond.store397
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 5
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 64
+; RV32-NEXT:    bnez a3, .LBB8_402
+; RV32-NEXT:    j .LBB8_151
+; RV32-NEXT:  .LBB8_402: # %cond.store400
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 6
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 128
+; RV32-NEXT:    bnez a3, .LBB8_403
+; RV32-NEXT:    j .LBB8_152
+; RV32-NEXT:  .LBB8_403: # %cond.store403
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 7
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 256
+; RV32-NEXT:    bnez a3, .LBB8_404
+; RV32-NEXT:    j .LBB8_153
+; RV32-NEXT:  .LBB8_404: # %cond.store406
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 8
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 512
+; RV32-NEXT:    bnez a3, .LBB8_405
+; RV32-NEXT:    j .LBB8_154
+; RV32-NEXT:  .LBB8_405: # %cond.store409
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 9
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    andi a3, a4, 1024
+; RV32-NEXT:    bnez a3, .LBB8_406
+; RV32-NEXT:    j .LBB8_155
+; RV32-NEXT:  .LBB8_406: # %cond.store412
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 10
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 20
+; RV32-NEXT:    bltz a3, .LBB8_407
+; RV32-NEXT:    j .LBB8_156
+; RV32-NEXT:  .LBB8_407: # %cond.store415
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 11
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 19
+; RV32-NEXT:    bltz a3, .LBB8_408
+; RV32-NEXT:    j .LBB8_157
+; RV32-NEXT:  .LBB8_408: # %cond.store418
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 12
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 18
+; RV32-NEXT:    bltz a3, .LBB8_409
+; RV32-NEXT:    j .LBB8_158
+; RV32-NEXT:  .LBB8_409: # %cond.store421
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 13
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 17
+; RV32-NEXT:    bltz a3, .LBB8_410
+; RV32-NEXT:    j .LBB8_159
+; RV32-NEXT:  .LBB8_410: # %cond.store424
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 14
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 16
+; RV32-NEXT:    bltz a3, .LBB8_411
+; RV32-NEXT:    j .LBB8_160
+; RV32-NEXT:  .LBB8_411: # %cond.store427
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v9, v24, 15
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v9, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 15
+; RV32-NEXT:    bltz a3, .LBB8_412
+; RV32-NEXT:    j .LBB8_161
+; RV32-NEXT:  .LBB8_412: # %cond.store430
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 16
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 14
+; RV32-NEXT:    bltz a3, .LBB8_413
+; RV32-NEXT:    j .LBB8_162
+; RV32-NEXT:  .LBB8_413: # %cond.store433
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 17
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 13
+; RV32-NEXT:    bltz a3, .LBB8_414
+; RV32-NEXT:    j .LBB8_163
+; RV32-NEXT:  .LBB8_414: # %cond.store436
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 18
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 12
+; RV32-NEXT:    bltz a3, .LBB8_415
+; RV32-NEXT:    j .LBB8_164
+; RV32-NEXT:  .LBB8_415: # %cond.store439
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 19
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 11
+; RV32-NEXT:    bltz a3, .LBB8_416
+; RV32-NEXT:    j .LBB8_165
+; RV32-NEXT:  .LBB8_416: # %cond.store442
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 20
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 10
+; RV32-NEXT:    bltz a3, .LBB8_417
+; RV32-NEXT:    j .LBB8_166
+; RV32-NEXT:  .LBB8_417: # %cond.store445
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 21
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 9
+; RV32-NEXT:    bltz a3, .LBB8_418
+; RV32-NEXT:    j .LBB8_167
+; RV32-NEXT:  .LBB8_418: # %cond.store448
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 22
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 8
+; RV32-NEXT:    bltz a3, .LBB8_419
+; RV32-NEXT:    j .LBB8_168
+; RV32-NEXT:  .LBB8_419: # %cond.store451
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 23
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 7
+; RV32-NEXT:    bltz a3, .LBB8_420
+; RV32-NEXT:    j .LBB8_169
+; RV32-NEXT:  .LBB8_420: # %cond.store454
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 24
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 6
+; RV32-NEXT:    bltz a3, .LBB8_421
+; RV32-NEXT:    j .LBB8_170
+; RV32-NEXT:  .LBB8_421: # %cond.store457
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 25
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 5
+; RV32-NEXT:    bltz a3, .LBB8_422
+; RV32-NEXT:    j .LBB8_171
+; RV32-NEXT:  .LBB8_422: # %cond.store460
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 26
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 4
+; RV32-NEXT:    bltz a3, .LBB8_423
+; RV32-NEXT:    j .LBB8_172
+; RV32-NEXT:  .LBB8_423: # %cond.store463
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 27
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 3
+; RV32-NEXT:    bltz a3, .LBB8_424
+; RV32-NEXT:    j .LBB8_173
+; RV32-NEXT:  .LBB8_424: # %cond.store466
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 28
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a3, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a3
+; RV32-NEXT:    slli a3, a4, 2
+; RV32-NEXT:    bgez a3, .LBB8_521
+; RV32-NEXT:    j .LBB8_174
+; RV32-NEXT:  .LBB8_521: # %cond.store466
+; RV32-NEXT:    j .LBB8_175
+; RV32-NEXT:  .LBB8_425: # %cond.store475
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v10, v24, 31
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    addi a4, a0, 1
+; RV32-NEXT:    vse8.v v10, (a0)
+; RV32-NEXT:    mv a0, a4
+; RV32-NEXT:    andi a4, a3, 1
+; RV32-NEXT:    bnez a4, .LBB8_426
+; RV32-NEXT:    j .LBB8_179
+; RV32-NEXT:  .LBB8_426: # %cond.store478
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 1651(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 2
+; RV32-NEXT:    bnez a4, .LBB8_427
+; RV32-NEXT:    j .LBB8_180
+; RV32-NEXT:  .LBB8_427: # %cond.store481
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -128
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 1524(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 4
+; RV32-NEXT:    bnez a4, .LBB8_428
+; RV32-NEXT:    j .LBB8_181
+; RV32-NEXT:  .LBB8_428: # %cond.store484
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -256
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 1397(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 8
+; RV32-NEXT:    bnez a4, .LBB8_429
+; RV32-NEXT:    j .LBB8_182
+; RV32-NEXT:  .LBB8_429: # %cond.store487
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -384
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 1270(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 16
+; RV32-NEXT:    bnez a4, .LBB8_430
+; RV32-NEXT:    j .LBB8_183
+; RV32-NEXT:  .LBB8_430: # %cond.store490
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 23
+; RV32-NEXT:    slli a5, a5, 9
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 1143(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 32
+; RV32-NEXT:    bnez a4, .LBB8_431
+; RV32-NEXT:    j .LBB8_184
+; RV32-NEXT:  .LBB8_431: # %cond.store493
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -640
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 1016(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 64
+; RV32-NEXT:    bnez a4, .LBB8_432
+; RV32-NEXT:    j .LBB8_185
+; RV32-NEXT:  .LBB8_432: # %cond.store496
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -768
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 889(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 128
+; RV32-NEXT:    bnez a4, .LBB8_433
+; RV32-NEXT:    j .LBB8_186
+; RV32-NEXT:  .LBB8_433: # %cond.store499
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -896
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 762(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 256
+; RV32-NEXT:    bnez a4, .LBB8_434
+; RV32-NEXT:    j .LBB8_187
+; RV32-NEXT:  .LBB8_434: # %cond.store502
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 11
+; RV32-NEXT:    slli a5, a5, 10
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 635(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 512
+; RV32-NEXT:    bnez a4, .LBB8_435
+; RV32-NEXT:    j .LBB8_188
+; RV32-NEXT:  .LBB8_435: # %cond.store505
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -1152
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 508(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    andi a4, a3, 1024
+; RV32-NEXT:    bnez a4, .LBB8_436
+; RV32-NEXT:    j .LBB8_189
+; RV32-NEXT:  .LBB8_436: # %cond.store508
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -1280
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 381(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 20
+; RV32-NEXT:    bltz a4, .LBB8_437
+; RV32-NEXT:    j .LBB8_190
+; RV32-NEXT:  .LBB8_437: # %cond.store511
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -1408
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 254(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 19
+; RV32-NEXT:    bltz a4, .LBB8_438
+; RV32-NEXT:    j .LBB8_191
+; RV32-NEXT:  .LBB8_438: # %cond.store514
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    li a5, 21
+; RV32-NEXT:    slli a5, a5, 9
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 127(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 18
+; RV32-NEXT:    bgez a4, .LBB8_522
+; RV32-NEXT:    j .LBB8_192
+; RV32-NEXT:  .LBB8_522: # %cond.store514
+; RV32-NEXT:    j .LBB8_193
+; RV32-NEXT:  .LBB8_439: # %cond.store520
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -1792
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 2032(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 16
+; RV32-NEXT:    bltz a4, .LBB8_440
+; RV32-NEXT:    j .LBB8_195
+; RV32-NEXT:  .LBB8_440: # %cond.store523
+; RV32-NEXT:    li a4, 128
+; RV32-NEXT:    lui a5, 3
+; RV32-NEXT:    addi a5, a5, -1920
+; RV32-NEXT:    add a5, sp, a5
+; RV32-NEXT:    vsetvli zero, a4, e8, m8, ta, ma
+; RV32-NEXT:    vse8.v v24, (a5)
+; RV32-NEXT:    lbu a4, 1905(a2)
+; RV32-NEXT:    addi a5, a0, 1
+; RV32-NEXT:    sb a4, 0(a0)
+; RV32-NEXT:    mv a0, a5
+; RV32-NEXT:    slli a4, a3, 15
+; RV32-NEXT:    bltz a4, .LBB
----------------
topperc wrote:

Looks like there's no alignment information so the scalar stores got split

https://github.com/llvm/llvm-project/pull/83457


More information about the llvm-commits mailing list