[llvm] 0a29827 - [RISCV] Add a negative case for strided load matching

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 8 13:34:08 PDT 2023


Author: Philip Reames
Date: 2023-09-08T13:33:52-07:00
New Revision: 0a298277d341da2a46ccba5a5eb4d4aba120a01a

URL: https://github.com/llvm/llvm-project/commit/0a298277d341da2a46ccba5a5eb4d4aba120a01a
DIFF: https://github.com/llvm/llvm-project/commit/0a298277d341da2a46ccba5a5eb4d4aba120a01a.diff

LOG: [RISCV] Add a negative case for strided load matching

This covers the bug identified in review of pr 65777.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 30790064090c4ff..f7352b4659e5a9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -13294,13 +13294,87 @@ define <4 x i32> @mgather_unit_stride_load_wide_idx(ptr %base) {
   ret <4 x i32> %v
 }
 
+; This looks like a strided load (at i8), but isn't at index type.
+define <4 x i32> @mgather_narrow_edge_case(ptr %base) {
+; RV32-LABEL: mgather_narrow_edge_case:
+; RV32:       # %bb.0:
+; RV32-NEXT:    li a1, -512
+; RV32-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT:    vmv.v.i v0, 5
+; RV32-NEXT:    vmv.v.x v8, a1
+; RV32-NEXT:    vmerge.vim v8, v8, 0, v0
+; RV32-NEXT:    vluxei32.v v8, (a0), v8
+; RV32-NEXT:    ret
+;
+; RV64V-LABEL: mgather_narrow_edge_case:
+; RV64V:       # %bb.0:
+; RV64V-NEXT:    li a1, -512
+; RV64V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64V-NEXT:    vmv.v.x v8, a1
+; RV64V-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; RV64V-NEXT:    vmv.v.i v0, 5
+; RV64V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64V-NEXT:    vmerge.vim v10, v8, 0, v0
+; RV64V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; RV64V-NEXT:    vluxei64.v v8, (a0), v10
+; RV64V-NEXT:    ret
+;
+; RV64ZVE32F-LABEL: mgather_narrow_edge_case:
+; RV64ZVE32F:       # %bb.0:
+; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; RV64ZVE32F-NEXT:    vmset.m v8
+; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
+; RV64ZVE32F-NEXT:    # implicit-def: $v8
+; RV64ZVE32F-NEXT:    bnez zero, .LBB106_2
+; RV64ZVE32F-NEXT:  # %bb.1: # %cond.load
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vlse32.v v8, (a0), zero
+; RV64ZVE32F-NEXT:  .LBB106_2: # %else
+; RV64ZVE32F-NEXT:    andi a3, a1, 2
+; RV64ZVE32F-NEXT:    addi a2, a0, -512
+; RV64ZVE32F-NEXT:    bnez a3, .LBB106_6
+; RV64ZVE32F-NEXT:  # %bb.3: # %else2
+; RV64ZVE32F-NEXT:    andi a3, a1, 4
+; RV64ZVE32F-NEXT:    bnez a3, .LBB106_7
+; RV64ZVE32F-NEXT:  .LBB106_4: # %else5
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    bnez a1, .LBB106_8
+; RV64ZVE32F-NEXT:  .LBB106_5: # %else8
+; RV64ZVE32F-NEXT:    ret
+; RV64ZVE32F-NEXT:  .LBB106_6: # %cond.load1
+; RV64ZVE32F-NEXT:    lw a3, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 2, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a3
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
+; RV64ZVE32F-NEXT:    andi a3, a1, 4
+; RV64ZVE32F-NEXT:    beqz a3, .LBB106_4
+; RV64ZVE32F-NEXT:  .LBB106_7: # %cond.load4
+; RV64ZVE32F-NEXT:    lw a0, 0(a0)
+; RV64ZVE32F-NEXT:    vsetivli zero, 3, e32, m1, tu, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
+; RV64ZVE32F-NEXT:    andi a1, a1, 8
+; RV64ZVE32F-NEXT:    beqz a1, .LBB106_5
+; RV64ZVE32F-NEXT:  .LBB106_8: # %cond.load7
+; RV64ZVE32F-NEXT:    lw a0, 0(a2)
+; RV64ZVE32F-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; RV64ZVE32F-NEXT:    vmv.s.x v9, a0
+; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
+; RV64ZVE32F-NEXT:    ret
+  %head = insertelement <4 x i1> poison, i1 true, i32 0
+  %allones = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+  %ptrs = getelementptr inbounds i32, ptr %base, <4 x i8>  <i8 0, i8 128, i8 0, i8 128>
+  %v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> %allones, <4 x i32> poison)
+  ret <4 x i32> %v
+}
+
 
 ; TODO: Recognize as strided load with SEW=32
 define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 ; RV32-LABEL: mgather_strided_2xSEW:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI106_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI106_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI107_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI107_0)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vle32.v v10, (a1)
 ; RV32-NEXT:    vluxei32.v v8, (a0), v10
@@ -13308,8 +13382,8 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 ;
 ; RV64V-LABEL: mgather_strided_2xSEW:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    lui a1, %hi(.LCPI106_0)
-; RV64V-NEXT:    addi a1, a1, %lo(.LCPI106_0)
+; RV64V-NEXT:    lui a1, %hi(.LCPI107_0)
+; RV64V-NEXT:    addi a1, a1, %lo(.LCPI107_0)
 ; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV64V-NEXT:    vle64.v v12, (a1)
 ; RV64V-NEXT:    vluxei64.v v8, (a0), v12
@@ -13321,35 +13395,35 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 ; RV64ZVE32F-NEXT:    vmset.m v8
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB106_9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB107_9
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB106_10
-; RV64ZVE32F-NEXT:  .LBB106_2: # %else2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_10
+; RV64ZVE32F-NEXT:  .LBB107_2: # %else2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB106_11
-; RV64ZVE32F-NEXT:  .LBB106_3: # %else5
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_11
+; RV64ZVE32F-NEXT:  .LBB107_3: # %else5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB106_12
-; RV64ZVE32F-NEXT:  .LBB106_4: # %else8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_12
+; RV64ZVE32F-NEXT:  .LBB107_4: # %else8
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB106_13
-; RV64ZVE32F-NEXT:  .LBB106_5: # %else11
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_13
+; RV64ZVE32F-NEXT:  .LBB107_5: # %else11
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB106_14
-; RV64ZVE32F-NEXT:  .LBB106_6: # %else14
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_14
+; RV64ZVE32F-NEXT:  .LBB107_6: # %else14
 ; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB106_15
-; RV64ZVE32F-NEXT:  .LBB106_7: # %else17
+; RV64ZVE32F-NEXT:    bnez a2, .LBB107_15
+; RV64ZVE32F-NEXT:  .LBB107_7: # %else17
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB106_16
-; RV64ZVE32F-NEXT:  .LBB106_8: # %else20
+; RV64ZVE32F-NEXT:    bnez a1, .LBB107_16
+; RV64ZVE32F-NEXT:  .LBB107_8: # %else20
 ; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB106_9: # %cond.load
+; RV64ZVE32F-NEXT:  .LBB107_9: # %cond.load
 ; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB106_2
-; RV64ZVE32F-NEXT:  .LBB106_10: # %cond.load1
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_2
+; RV64ZVE32F-NEXT:  .LBB107_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    addi a2, a0, 2
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
@@ -13357,48 +13431,48 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB106_3
-; RV64ZVE32F-NEXT:  .LBB106_11: # %cond.load4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_3
+; RV64ZVE32F-NEXT:  .LBB107_11: # %cond.load4
 ; RV64ZVE32F-NEXT:    addi a2, a0, 8
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB106_4
-; RV64ZVE32F-NEXT:  .LBB106_12: # %cond.load7
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_4
+; RV64ZVE32F-NEXT:  .LBB107_12: # %cond.load7
 ; RV64ZVE32F-NEXT:    addi a2, a0, 10
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB106_5
-; RV64ZVE32F-NEXT:  .LBB106_13: # %cond.load10
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_5
+; RV64ZVE32F-NEXT:  .LBB107_13: # %cond.load10
 ; RV64ZVE32F-NEXT:    addi a2, a0, 16
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB106_6
-; RV64ZVE32F-NEXT:  .LBB106_14: # %cond.load13
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_6
+; RV64ZVE32F-NEXT:  .LBB107_14: # %cond.load13
 ; RV64ZVE32F-NEXT:    addi a2, a0, 18
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB106_7
-; RV64ZVE32F-NEXT:  .LBB106_15: # %cond.load16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB107_7
+; RV64ZVE32F-NEXT:  .LBB107_15: # %cond.load16
 ; RV64ZVE32F-NEXT:    addi a2, a0, 24
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB106_8
-; RV64ZVE32F-NEXT:  .LBB106_16: # %cond.load19
+; RV64ZVE32F-NEXT:    beqz a1, .LBB107_8
+; RV64ZVE32F-NEXT:  .LBB107_16: # %cond.load19
 ; RV64ZVE32F-NEXT:    addi a0, a0, 26
 ; RV64ZVE32F-NEXT:    lh a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
@@ -13416,8 +13490,8 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
 define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
 ; RV32-LABEL: mgather_gather_2xSEW:
 ; RV32:       # %bb.0:
-; RV32-NEXT:    lui a1, %hi(.LCPI107_0)
-; RV32-NEXT:    addi a1, a1, %lo(.LCPI107_0)
+; RV32-NEXT:    lui a1, %hi(.LCPI108_0)
+; RV32-NEXT:    addi a1, a1, %lo(.LCPI108_0)
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV32-NEXT:    vle32.v v10, (a1)
 ; RV32-NEXT:    vluxei32.v v8, (a0), v10
@@ -13425,8 +13499,8 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
 ;
 ; RV64V-LABEL: mgather_gather_2xSEW:
 ; RV64V:       # %bb.0:
-; RV64V-NEXT:    lui a1, %hi(.LCPI107_0)
-; RV64V-NEXT:    addi a1, a1, %lo(.LCPI107_0)
+; RV64V-NEXT:    lui a1, %hi(.LCPI108_0)
+; RV64V-NEXT:    addi a1, a1, %lo(.LCPI108_0)
 ; RV64V-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; RV64V-NEXT:    vle64.v v12, (a1)
 ; RV64V-NEXT:    vluxei64.v v8, (a0), v12
@@ -13438,35 +13512,35 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
 ; RV64ZVE32F-NEXT:    vmset.m v8
 ; RV64ZVE32F-NEXT:    vmv.x.s a1, v8
 ; RV64ZVE32F-NEXT:    # implicit-def: $v8
-; RV64ZVE32F-NEXT:    beqz zero, .LBB107_9
+; RV64ZVE32F-NEXT:    beqz zero, .LBB108_9
 ; RV64ZVE32F-NEXT:  # %bb.1: # %else
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_10
-; RV64ZVE32F-NEXT:  .LBB107_2: # %else2
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_10
+; RV64ZVE32F-NEXT:  .LBB108_2: # %else2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_11
-; RV64ZVE32F-NEXT:  .LBB107_3: # %else5
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_11
+; RV64ZVE32F-NEXT:  .LBB108_3: # %else5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_12
-; RV64ZVE32F-NEXT:  .LBB107_4: # %else8
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_12
+; RV64ZVE32F-NEXT:  .LBB108_4: # %else8
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_13
-; RV64ZVE32F-NEXT:  .LBB107_5: # %else11
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_13
+; RV64ZVE32F-NEXT:  .LBB108_5: # %else11
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_14
-; RV64ZVE32F-NEXT:  .LBB107_6: # %else14
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_14
+; RV64ZVE32F-NEXT:  .LBB108_6: # %else14
 ; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    bnez a2, .LBB107_15
-; RV64ZVE32F-NEXT:  .LBB107_7: # %else17
+; RV64ZVE32F-NEXT:    bnez a2, .LBB108_15
+; RV64ZVE32F-NEXT:  .LBB108_7: # %else17
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    bnez a1, .LBB107_16
-; RV64ZVE32F-NEXT:  .LBB107_8: # %else20
+; RV64ZVE32F-NEXT:    bnez a1, .LBB108_16
+; RV64ZVE32F-NEXT:  .LBB108_8: # %else20
 ; RV64ZVE32F-NEXT:    ret
-; RV64ZVE32F-NEXT:  .LBB107_9: # %cond.load
+; RV64ZVE32F-NEXT:  .LBB108_9: # %cond.load
 ; RV64ZVE32F-NEXT:    vlse16.v v8, (a0), zero
 ; RV64ZVE32F-NEXT:    andi a2, a1, 2
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_2
-; RV64ZVE32F-NEXT:  .LBB107_10: # %cond.load1
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_2
+; RV64ZVE32F-NEXT:  .LBB108_10: # %cond.load1
 ; RV64ZVE32F-NEXT:    addi a2, a0, 2
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
@@ -13474,48 +13548,48 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 1
 ; RV64ZVE32F-NEXT:    andi a2, a1, 4
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_3
-; RV64ZVE32F-NEXT:  .LBB107_11: # %cond.load4
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_3
+; RV64ZVE32F-NEXT:  .LBB108_11: # %cond.load4
 ; RV64ZVE32F-NEXT:    addi a2, a0, 4
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 3, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 2
 ; RV64ZVE32F-NEXT:    andi a2, a1, 8
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_4
-; RV64ZVE32F-NEXT:  .LBB107_12: # %cond.load7
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_4
+; RV64ZVE32F-NEXT:  .LBB108_12: # %cond.load7
 ; RV64ZVE32F-NEXT:    addi a2, a0, 6
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 3
 ; RV64ZVE32F-NEXT:    andi a2, a1, 16
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_5
-; RV64ZVE32F-NEXT:  .LBB107_13: # %cond.load10
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_5
+; RV64ZVE32F-NEXT:  .LBB108_13: # %cond.load10
 ; RV64ZVE32F-NEXT:    addi a2, a0, 16
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 5, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 4
 ; RV64ZVE32F-NEXT:    andi a2, a1, 32
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_6
-; RV64ZVE32F-NEXT:  .LBB107_14: # %cond.load13
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_6
+; RV64ZVE32F-NEXT:  .LBB108_14: # %cond.load13
 ; RV64ZVE32F-NEXT:    addi a2, a0, 18
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 6, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 5
 ; RV64ZVE32F-NEXT:    andi a2, a1, 64
-; RV64ZVE32F-NEXT:    beqz a2, .LBB107_7
-; RV64ZVE32F-NEXT:  .LBB107_15: # %cond.load16
+; RV64ZVE32F-NEXT:    beqz a2, .LBB108_7
+; RV64ZVE32F-NEXT:  .LBB108_15: # %cond.load16
 ; RV64ZVE32F-NEXT:    addi a2, a0, 20
 ; RV64ZVE32F-NEXT:    lh a2, 0(a2)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 7, e16, m1, tu, ma
 ; RV64ZVE32F-NEXT:    vmv.s.x v9, a2
 ; RV64ZVE32F-NEXT:    vslideup.vi v8, v9, 6
 ; RV64ZVE32F-NEXT:    andi a1, a1, -128
-; RV64ZVE32F-NEXT:    beqz a1, .LBB107_8
-; RV64ZVE32F-NEXT:  .LBB107_16: # %cond.load19
+; RV64ZVE32F-NEXT:    beqz a1, .LBB108_8
+; RV64ZVE32F-NEXT:  .LBB108_16: # %cond.load19
 ; RV64ZVE32F-NEXT:    addi a0, a0, 22
 ; RV64ZVE32F-NEXT:    lh a0, 0(a0)
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e16, m1, ta, ma


        


More information about the llvm-commits mailing list