[llvm] 173332d - [RISCV] Manually emit the best shift for VSCALE lowering to improve codegen.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 17 00:52:19 PDT 2021


Author: Craig Topper
Date: 2021-07-17T00:52:07-07:00
New Revision: 173332d175614561770469d237f8c5ba6378a0e7

URL: https://github.com/llvm/llvm-project/commit/173332d175614561770469d237f8c5ba6378a0e7
DIFF: https://github.com/llvm/llvm-project/commit/173332d175614561770469d237f8c5ba6378a0e7.diff

LOG: [RISCV] Manually emit the best shift for VSCALE lowering to improve codegen.

We assume VLENB is a multiple of 8 and previously relied on shift
pairs being optimized to an AND+SHL/SHR and computeKnownBits
removing the AND. This doesn't happen if (vlenb >> 3) gets CSEd
to have multiple uses. This patch manually emits the best shift
to workaround this.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
    llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
    llvm/test/CodeGen/RISCV/rvv/stepvector.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5df79ab76b52..05a1dbedeba8 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2163,6 +2163,27 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
     // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
     // vscale as VLENB / 8.
     assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
+    if (isa<ConstantSDNode>(Op.getOperand(0))) {
+      // We assume VLENB is a multiple of 8. We manually choose the best shift
+      // here because SimplifyDemandedBits isn't always able to simplify it.
+      uint64_t Val = Op.getConstantOperandVal(0);
+      if (isPowerOf2_64(Val)) {
+        uint64_t Log2 = Log2_64(Val);
+        if (Log2 < 3)
+          return DAG.getNode(ISD::SRL, DL, VT, VLENB,
+                             DAG.getConstant(3 - Log2, DL, VT));
+        if (Log2 > 3)
+          return DAG.getNode(ISD::SHL, DL, VT, VLENB,
+                             DAG.getConstant(Log2 - 3, DL, VT));
+        return VLENB;
+      }
+      // If the multiplier is a multiple of 8, scale it down to avoid needing
+      // to shift the VLENB value.
+      if ((Val % 8) == 0)
+        return DAG.getNode(ISD::MUL, DL, VT, VLENB,
+                           DAG.getConstant(Val / 8, DL, VT));
+    }
+
     SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
                                  DAG.getConstant(3, DL, VT));
     return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));

diff  --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 630a0511f064..96371d1c2de1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -70,12 +70,11 @@ define fastcc <vscale x 64 x i32> @ret_split_nxv64i32(<vscale x 64 x i32>* %x) {
 ; CHECK-LABEL: ret_split_nxv64i32:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    srli a2, a2, 3
-; CHECK-NEXT:    slli a3, a2, 6
+; CHECK-NEXT:    slli a3, a2, 3
 ; CHECK-NEXT:    add a4, a1, a3
 ; CHECK-NEXT:    vl8re32.v v8, (a4)
-; CHECK-NEXT:    slli a4, a2, 7
-; CHECK-NEXT:    addi a5, zero, 192
+; CHECK-NEXT:    slli a4, a2, 4
+; CHECK-NEXT:    addi a5, zero, 24
 ; CHECK-NEXT:    mul a2, a2, a5
 ; CHECK-NEXT:    add a5, a1, a4
 ; CHECK-NEXT:    vl8re32.v v16, (a1)
@@ -104,8 +103,7 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    slli a2, a2, 5
 ; CHECK-NEXT:    sub sp, sp, a2
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    srli a2, a2, 3
-; CHECK-NEXT:    slli a6, a2, 6
+; CHECK-NEXT:    slli a6, a2, 3
 ; CHECK-NEXT:    add a4, a1, a6
 ; CHECK-NEXT:    vl8re32.v v8, (a4)
 ; CHECK-NEXT:    csrr a3, vlenb
@@ -114,7 +112,7 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    add a3, sp, a3
 ; CHECK-NEXT:    addi a3, a3, 16
 ; CHECK-NEXT:    vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT:    slli a7, a2, 7
+; CHECK-NEXT:    slli a7, a2, 4
 ; CHECK-NEXT:    add a5, a1, a7
 ; CHECK-NEXT:    vl8re32.v v8, (a5)
 ; CHECK-NEXT:    csrr a3, vlenb
@@ -122,7 +120,7 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    add a3, sp, a3
 ; CHECK-NEXT:    addi a3, a3, 16
 ; CHECK-NEXT:    vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT:    addi a5, zero, 192
+; CHECK-NEXT:    addi a5, zero, 24
 ; CHECK-NEXT:    mul t1, a2, a5
 ; CHECK-NEXT:    add a3, a1, t1
 ; CHECK-NEXT:    vl8re32.v v8, (a3)
@@ -131,18 +129,18 @@ define fastcc <vscale x 128 x i32> @ret_split_nxv128i32(<vscale x 128 x i32>* %x
 ; CHECK-NEXT:    add a3, sp, a3
 ; CHECK-NEXT:    addi a3, a3, 16
 ; CHECK-NEXT:    vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT:    slli t3, a2, 8
+; CHECK-NEXT:    slli t3, a2, 5
 ; CHECK-NEXT:    add a4, a1, t3
 ; CHECK-NEXT:    vl8re32.v v8, (a4)
 ; CHECK-NEXT:    addi a3, sp, 16
 ; CHECK-NEXT:    vs8r.v v8, (a3) # Unknown-size Folded Spill
-; CHECK-NEXT:    addi a4, zero, 320
+; CHECK-NEXT:    addi a4, zero, 40
 ; CHECK-NEXT:    mul a4, a2, a4
 ; CHECK-NEXT:    add t0, a1, a4
-; CHECK-NEXT:    addi a5, zero, 384
+; CHECK-NEXT:    addi a5, zero, 48
 ; CHECK-NEXT:    mul a5, a2, a5
 ; CHECK-NEXT:    add t2, a1, a5
-; CHECK-NEXT:    addi a3, zero, 448
+; CHECK-NEXT:    addi a3, zero, 56
 ; CHECK-NEXT:    mul a2, a2, a3
 ; CHECK-NEXT:    add a3, a1, a2
 ; CHECK-NEXT:    vl8re32.v v8, (a1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index 81cc4c97e1fd..52a9d2bc590c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -275,11 +275,11 @@ define <vscale x 16 x i8> @insert_nxv16i8_nxv1i8_2(<vscale x 16 x i8> %vec, <vsc
 ; CHECK-LABEL: insert_nxv16i8_nxv1i8_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    slli a1, a0, 1
-; CHECK-NEXT:    add a0, a1, a0
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vslideup.vx v8, v10, a1
+; CHECK-NEXT:    srli a1, a0, 3
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    add a1, a0, a1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
+; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv1i8.nxv16i8(<vscale x 16 x i8> %vec, <vscale x 1 x i8> %subvec, i64 2)
   ret <vscale x 16 x i8> %v
@@ -379,11 +379,11 @@ define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_26(<vscale x 1 x half
 ; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_26:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    slli a1, a0, 1
-; CHECK-NEXT:    add a0, a1, a0
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vslideup.vx v22, v8, a1
+; CHECK-NEXT:    srli a1, a0, 3
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    add a1, a0, a1
+; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT:    vslideup.vx v22, v8, a0
 ; CHECK-NEXT:    vmv8r.v v8, v16
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 26)
@@ -443,15 +443,15 @@ define <vscale x 4 x i1> @insert_nxv4i1_nxv1i1_2(<vscale x 4 x i1> %v, <vscale x
 ; CHECK-NEXT:    vmv.v.i v25, 0
 ; CHECK-NEXT:    vmerge.vim v25, v25, 1, v0
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    slli a1, a0, 1
-; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    srli a1, a0, 3
+; CHECK-NEXT:    srli a0, a0, 2
+; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.v.i v26, 0
 ; CHECK-NEXT:    vmv1r.v v0, v8
 ; CHECK-NEXT:    vmerge.vim v26, v26, 1, v0
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vslideup.vx v25, v26, a1
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
+; CHECK-NEXT:    vslideup.vx v25, v26, a0
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
 ; CHECK-NEXT:    vmsne.vi v0, v25, 0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index d5e8147adebc..91529aec0acd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1284,12 +1284,12 @@ define void @mgather_nxv16i64(<vscale x 8 x i64*> %ptrs0, <vscale x 8 x i64*> %p
 ; RV32-NEXT:    vsetvli a0, zero, e64, m8, tu, mu
 ; RV32-NEXT:    vloxei32.v v16, (zero), v8, v0.t
 ; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    srli a0, a0, 3
-; RV32-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
-; RV32-NEXT:    vslidedown.vx v0, v0, a0
+; RV32-NEXT:    srli a2, a0, 3
+; RV32-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
+; RV32-NEXT:    vslidedown.vx v0, v0, a2
 ; RV32-NEXT:    vsetvli a2, zero, e64, m8, tu, mu
 ; RV32-NEXT:    vloxei32.v v24, (zero), v12, v0.t
-; RV32-NEXT:    slli a0, a0, 6
+; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    vs8r.v v24, (a0)
 ; RV32-NEXT:    vs8r.v v16, (a1)
@@ -1310,14 +1310,14 @@ define void @mgather_nxv16i64(<vscale x 8 x i64*> %ptrs0, <vscale x 8 x i64*> %p
 ; RV64-NEXT:    vsetvli a0, zero, e64, m8, tu, mu
 ; RV64-NEXT:    vloxei64.v v24, (zero), v16, v0.t
 ; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    srli a0, a0, 3
-; RV64-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vslidedown.vx v0, v0, a0
+; RV64-NEXT:    srli a1, a0, 3
+; RV64-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
+; RV64-NEXT:    vslidedown.vx v0, v0, a1
 ; RV64-NEXT:    vsetvli a1, zero, e64, m8, tu, mu
 ; RV64-NEXT:    addi a1, sp, 16
 ; RV64-NEXT:    vl8re8.v v16, (a1) # Unknown-size Folded Reload
 ; RV64-NEXT:    vloxei64.v v8, (zero), v16, v0.t
-; RV64-NEXT:    slli a0, a0, 6
+; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add a0, a2, a0
 ; RV64-NEXT:    vs8r.v v8, (a0)
 ; RV64-NEXT:    vs8r.v v24, (a2)
@@ -2267,32 +2267,33 @@ define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(i8* %base, <vscale x 32 x i8>
 ; RV64-LABEL: mgather_baseidx_nxv32i8:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vmv1r.v v25, v0
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT:    vsext.vf8 v16, v8
-; RV64-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
-; RV64-NEXT:    vloxei64.v v12, (a0), v16, v0.t
 ; RV64-NEXT:    csrr a1, vlenb
+; RV64-NEXT:    srli a2, a1, 2
+; RV64-NEXT:    vsetvli a3, zero, e8, mf2, ta, mu
+; RV64-NEXT:    vslidedown.vx v26, v0, a2
 ; RV64-NEXT:    srli a1, a1, 3
 ; RV64-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vslidedown.vx v0, v0, a1
+; RV64-NEXT:    vslidedown.vx v0, v26, a1
 ; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
-; RV64-NEXT:    vsext.vf8 v16, v9
-; RV64-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
-; RV64-NEXT:    vloxei64.v v13, (a0), v16, v0.t
-; RV64-NEXT:    slli a2, a1, 1
-; RV64-NEXT:    vsetvli a3, zero, e8, mf2, ta, mu
-; RV64-NEXT:    vslidedown.vx v25, v25, a2
-; RV64-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
-; RV64-NEXT:    vslidedown.vx v0, v25, a1
-; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vsext.vf8 v16, v11
 ; RV64-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
 ; RV64-NEXT:    vloxei64.v v15, (a0), v16, v0.t
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
 ; RV64-NEXT:    vsext.vf8 v16, v10
 ; RV64-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
-; RV64-NEXT:    vmv1r.v v0, v25
+; RV64-NEXT:    vmv1r.v v0, v26
 ; RV64-NEXT:    vloxei64.v v14, (a0), v16, v0.t
+; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v8
+; RV64-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV64-NEXT:    vmv1r.v v0, v25
+; RV64-NEXT:    vloxei64.v v12, (a0), v16, v0.t
+; RV64-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
+; RV64-NEXT:    vslidedown.vx v0, v25, a1
+; RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
+; RV64-NEXT:    vsext.vf8 v16, v9
+; RV64-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV64-NEXT:    vloxei64.v v13, (a0), v16, v0.t
 ; RV64-NEXT:    vmv4r.v v8, v12
 ; RV64-NEXT:    ret
   %ptrs = getelementptr inbounds i8, i8* %base, <vscale x 32 x i8> %idxs

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll
index f5cc13646f70..7de0a3b3a6f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i32.ll
@@ -41,9 +41,8 @@ define i32 @vscale_non_pow2() nounwind {
 ; CHECK-LABEL: vscale_non_pow2:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a1, zero, 24
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    ret
 entry:
   %0 = call i32 @llvm.vscale.i32()

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
index 0ad9d9a577b5..29498ad5fcfe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
@@ -50,10 +50,9 @@ define i64 @vscale_uimmpow2xlen() nounwind {
 ;
 ; RV32-LABEL: vscale_uimmpow2xlen:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    srli a1, a0, 29
-; RV32-NEXT:    srli a0, a0, 3
-; RV32-NEXT:    slli a0, a0, 6
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a0, a1, 3
+; RV32-NEXT:    srli a1, a1, 29
 ; RV32-NEXT:    ret
 entry:
   %0 = call i64 @llvm.vscale.i64()
@@ -65,17 +64,17 @@ define i64 @vscale_non_pow2() nounwind {
 ; RV64-LABEL: vscale_non_pow2:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    srli a0, a0, 3
-; RV64-NEXT:    addi a1, zero, 24
-; RV64-NEXT:    mul a0, a0, a1
+; RV64-NEXT:    slli a1, a0, 1
+; RV64-NEXT:    add a0, a1, a0
 ; RV64-NEXT:    ret
 ;
 ; RV32-LABEL: vscale_non_pow2:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    srli a1, a0, 3
+; RV32-NEXT:    csrr a1, vlenb
+; RV32-NEXT:    slli a0, a1, 1
+; RV32-NEXT:    add a0, a0, a1
+; RV32-NEXT:    srli a1, a1, 3
 ; RV32-NEXT:    addi a2, zero, 24
-; RV32-NEXT:    mul a0, a1, a2
 ; RV32-NEXT:    mulhu a1, a1, a2
 ; RV32-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index 46ae06043cd9..ff57696568bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -485,9 +485,8 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
 ; CHECK-NEXT:    addi a0, zero, 3
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a1, zero, 24
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a1, a0, 1
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    vadd.vx v16, v8, a0
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
index a91ad7653ebe..ec511dfb87b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv32.ll
@@ -428,9 +428,9 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vmand.mm v25, v0, v25
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vslidedown.vx v0, v25, a0
+; CHECK-NEXT:    srli a2, a0, 3
+; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vslidedown.vx v0, v25, a2
 ; CHECK-NEXT:    vmv1r.v v2, v25
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv.v.i v24, 0
@@ -438,7 +438,7 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vmerge.vvm v8, v24, v8, v0
 ; CHECK-NEXT:    vs8r.v v8, (a1)
-; CHECK-NEXT:    slli a0, a0, 6
+; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    vs8r.v v16, (a0)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
index 3ebe925d625a..df67335d554a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp-rv64.ll
@@ -428,9 +428,9 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
 ; CHECK-NEXT:    vle1.v v25, (a0)
 ; CHECK-NEXT:    vmand.mm v25, v0, v25
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    vsetvli a2, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vslidedown.vx v0, v25, a0
+; CHECK-NEXT:    srli a2, a0, 3
+; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vslidedown.vx v0, v25, a2
 ; CHECK-NEXT:    vmv1r.v v2, v25
 ; CHECK-NEXT:    vsetvli a2, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    vmv.v.i v24, 0
@@ -438,7 +438,7 @@ define void @vselect_legalize_regression(<vscale x 16 x double> %a, <vscale x 16
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    vmerge.vvm v8, v24, v8, v0
 ; CHECK-NEXT:    vs8r.v v8, (a1)
-; CHECK-NEXT:    slli a0, a0, 6
+; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    vs8r.v v16, (a0)
 ; CHECK-NEXT:    ret


        


More information about the llvm-commits mailing list