[llvm] [RISCV] Combine vslidedown_vl with known VL to a smaller LMUL (PR #66267)

via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 13 11:14:13 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v
            
<details>
<summary>Changes</summary>
If we know the VL of a vslidedown_vl, we can work out the minimum number of
registers it's going to operate across. We can reuse the logic from
extract_vector_elt to perform it in a smaller type and reduce the LMUL.

One observation from adding this is that the vslide*_vl nodes all take a mask
operand, but currently anything other than vmset_vl will fail to select, as all
the patterns expect true_mask. So we need to create a new vmset_vl instead of
using extract_subvector on the existing vmset_vl.

This could maybe also be done in InsertVSETVLI, but I haven't explored it yet.

--

Patch is 308.49 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/66267.diff

14 Files Affected:

- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+34) 
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll (+13-13) 
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll (+3-3) 
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll (+30-24) 
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll (+16-16) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll (+10-10) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll (+20-18) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll (+40-40) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll (+274-227) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll (+220-220) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll (+711-700) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll (+1-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/vector-splice.ll (+24-24) 


<pre>
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a89961dccc70f3d..d16264c8f1f5f7f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -14099,6 +14099,40 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     if (SDValue V = performCONCAT_VECTORSCombine(N, DAG, Subtarget, *this))
       return V;
     break;
+  case RISCVISD::VSLIDEDOWN_VL: {
+    MVT OrigVT = N-&gt;getSimpleValueType(0);
+    auto *CVL = dyn_cast&lt;ConstantSDNode&gt;(N-&gt;getOperand(4));
+    if (!CVL)
+      break;
+    // We can try and reduce the LMUL that a vslidedown uses if we know what VL
+    // is. For example, if the target has Zvl128b, a vslidedown of e32 with VL=5
+    // is only going to change at most the first 2 registers. So if we were
+    // operating at LMUL=4 (nxv8i32), we can reduce it to LMUL=2(nxv4i32).
+    if (auto ShrunkVT = getSmallestVTForIndex(OrigVT, CVL-&gt;getZExtValue(), DL,
+                                              DAG, Subtarget)) {
+      SDValue ShrunkPassthru =
+          DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, *ShrunkVT, N-&gt;getOperand(0),
+                      DAG.getVectorIdxConstant(0, DL));
+      SDValue ShrunkInVec =
+          DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, *ShrunkVT, N-&gt;getOperand(1),
+                      DAG.getVectorIdxConstant(0, DL));
+
+      // The only mask ever used in vslide*_vl nodes is vmset_vl, and the only
+      // patterns on vslide*_vl only accept vmset_vl. So create a new vmset
+      // since using an extract_subvector breaks patterns.
+      assert(N-&gt;getOperand(3).getOpcode() == RISCVISD::VMSET_VL);
+      SDValue ShrunkMask =
+          DAG.getNode(RISCVISD::VMSET_VL, SDLoc(N), getMaskTypeFor(*ShrunkVT),
+                      N-&gt;getOperand(4));
+      SDValue ShrunkSlidedown =
+          DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, *ShrunkVT,
+                      {ShrunkPassthru, ShrunkInVec, N-&gt;getOperand(2),
+                       ShrunkMask, N-&gt;getOperand(4), N-&gt;getOperand(5)});
+      return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, OrigVT, N-&gt;getOperand(0),
+                         ShrunkSlidedown, DAG.getVectorIdxConstant(0, DL));
+    }
+    break;
+  }
   case RISCVISD::VFMV_V_F_VL: {
     const MVT VT = N-&gt;getSimpleValueType(0);
     SDValue Passthru = N-&gt;getOperand(0);
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index 36dfd631b7664fe..e0a8430c9670d05 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -124,7 +124,7 @@ define half @extractelt_nxv8f16_imm(&lt;vscale x 8 x half&gt; %v) {
 define half @extractelt_nxv8f16_idx(&lt;vscale x 8 x half&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv8f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -156,7 +156,7 @@ define half @extractelt_nxv16f16_imm(&lt;vscale x 16 x half&gt; %v) {
 define half @extractelt_nxv16f16_idx(&lt;vscale x 16 x half&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv16f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -188,7 +188,7 @@ define half @extractelt_nxv32f16_imm(&lt;vscale x 32 x half&gt; %v) {
 define half @extractelt_nxv32f16_idx(&lt;vscale x 32 x half&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv32f16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -284,7 +284,7 @@ define float @extractelt_nxv4f32_imm(&lt;vscale x 4 x float&gt; %v) {
 define float @extractelt_nxv4f32_idx(&lt;vscale x 4 x float&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv4f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -316,7 +316,7 @@ define float @extractelt_nxv8f32_imm(&lt;vscale x 8 x float&gt; %v) {
 define float @extractelt_nxv8f32_idx(&lt;vscale x 8 x float&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv8f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -348,7 +348,7 @@ define float @extractelt_nxv16f32_imm(&lt;vscale x 16 x float&gt; %v) {
 define float @extractelt_nxv16f32_idx(&lt;vscale x 16 x float&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv16f32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -401,7 +401,7 @@ define double @extractelt_nxv2f64_0(&lt;vscale x 2 x double&gt; %v) {
 define double @extractelt_nxv2f64_imm(&lt;vscale x 2 x double&gt; %v) {
 ; CHECK-LABEL: extractelt_nxv2f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -412,7 +412,7 @@ define double @extractelt_nxv2f64_imm(&lt;vscale x 2 x double&gt; %v) {
 define double @extractelt_nxv2f64_idx(&lt;vscale x 2 x double&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv2f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -433,7 +433,7 @@ define double @extractelt_nxv4f64_0(&lt;vscale x 4 x double&gt; %v) {
 define double @extractelt_nxv4f64_imm(&lt;vscale x 4 x double&gt; %v) {
 ; CHECK-LABEL: extractelt_nxv4f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -444,7 +444,7 @@ define double @extractelt_nxv4f64_imm(&lt;vscale x 4 x double&gt; %v) {
 define double @extractelt_nxv4f64_idx(&lt;vscale x 4 x double&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv4f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -465,7 +465,7 @@ define double @extractelt_nxv8f64_0(&lt;vscale x 8 x double&gt; %v) {
 define double @extractelt_nxv8f64_imm(&lt;vscale x 8 x double&gt; %v) {
 ; CHECK-LABEL: extractelt_nxv8f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -476,7 +476,7 @@ define double @extractelt_nxv8f64_imm(&lt;vscale x 8 x double&gt; %v) {
 define double @extractelt_nxv8f64_idx(&lt;vscale x 8 x double&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv8f64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
@@ -663,7 +663,7 @@ define double @extractelt_nxv16f64_neg1(&lt;vscale x 16 x double&gt; %v) {
 define double @extractelt_nxv16f64_imm(&lt;vscale x 16 x double&gt; %v) {
 ; CHECK-LABEL: extractelt_nxv16f64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index ba8486780197e48..c6bfac88ccd8ecd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -78,7 +78,7 @@ define i1 @extractelt_nxv16i1(&lt;vscale x 16 x i8&gt;* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -96,7 +96,7 @@ define i1 @extractelt_nxv32i1(&lt;vscale x 32 x i8&gt;* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetivli zero, 1, e8, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -114,7 +114,7 @@ define i1 @extractelt_nxv64i1(&lt;vscale x 64 x i8&gt;* %x, i64 %idx) nounwind {
 ; CHECK-NEXT:    vmseq.vi v0, v8, 0
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT:    vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index fd2f89e26e59809..8b061e20641f357 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -157,7 +157,7 @@ define signext i8 @extractelt_nxv16i8_imm(&lt;vscale x 16 x i8&gt; %v) {
 define signext i8 @extractelt_nxv16i8_idx(&lt;vscale x 16 x i8&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv16i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -189,7 +189,7 @@ define signext i8 @extractelt_nxv32i8_imm(&lt;vscale x 32 x i8&gt; %v) {
 define signext i8 @extractelt_nxv32i8_idx(&lt;vscale x 32 x i8&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv32i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -221,7 +221,7 @@ define signext i8 @extractelt_nxv64i8_imm(&lt;vscale x 64 x i8&gt; %v) {
 define signext i8 @extractelt_nxv64i8_idx(&lt;vscale x 64 x i8&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv64i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -349,7 +349,7 @@ define signext i16 @extractelt_nxv8i16_imm(&lt;vscale x 8 x i16&gt; %v) {
 define signext i16 @extractelt_nxv8i16_idx(&lt;vscale x 8 x i16&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -381,7 +381,7 @@ define signext i16 @extractelt_nxv16i16_imm(&lt;vscale x 16 x i16&gt; %v) {
 define signext i16 @extractelt_nxv16i16_idx(&lt;vscale x 16 x i16&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv16i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -413,7 +413,7 @@ define signext i16 @extractelt_nxv32i16_imm(&lt;vscale x 32 x i16&gt; %v) {
 define signext i16 @extractelt_nxv32i16_idx(&lt;vscale x 32 x i16&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv32i16_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -509,7 +509,7 @@ define i32 @extractelt_nxv4i32_imm(&lt;vscale x 4 x i32&gt; %v) {
 define i32 @extractelt_nxv4i32_idx(&lt;vscale x 4 x i32&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -541,7 +541,7 @@ define i32 @extractelt_nxv8i32_imm(&lt;vscale x 8 x i32&gt; %v) {
 define i32 @extractelt_nxv8i32_idx(&lt;vscale x 8 x i32&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -573,7 +573,7 @@ define i32 @extractelt_nxv16i32_imm(&lt;vscale x 16 x i32&gt; %v) {
 define i32 @extractelt_nxv16i32_idx(&lt;vscale x 16 x i32&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv16i32_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -638,12 +638,13 @@ define i64 @extractelt_nxv2i64_0(&lt;vscale x 2 x i64&gt; %v) {
 define i64 @extractelt_nxv2i64_imm(&lt;vscale x 2 x i64&gt; %v) {
 ; CHECK-LABEL: extractelt_nxv2i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vsrl.vx v10, v8, a0
-; CHECK-NEXT:    vmv.x.s a1, v10
 ; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsrl.vx v8, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
   %r = extractelement &lt;vscale x 2 x i64&gt; %v, i32 2
   ret i64 %r
@@ -652,10 +653,11 @@ define i64 @extractelt_nxv2i64_imm(&lt;vscale x 2 x i64&gt; %v) {
 define i64 @extractelt_nxv2i64_idx(&lt;vscale x 2 x i64&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv2i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -679,12 +681,13 @@ define i64 @extractelt_nxv4i64_0(&lt;vscale x 4 x i64&gt; %v) {
 define i64 @extractelt_nxv4i64_imm(&lt;vscale x 4 x i64&gt; %v) {
 ; CHECK-LABEL: extractelt_nxv4i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vsrl.vx v12, v8, a0
-; CHECK-NEXT:    vmv.x.s a1, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT:    vsrl.vx v8, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
   %r = extractelement &lt;vscale x 4 x i64&gt; %v, i32 2
   ret i64 %r
@@ -693,10 +696,11 @@ define i64 @extractelt_nxv4i64_imm(&lt;vscale x 4 x i64&gt; %v) {
 define i64 @extractelt_nxv4i64_idx(&lt;vscale x 4 x i64&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv4i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetivli zero, 1, e64, m4, ta, ma
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
@@ -720,12 +724,13 @@ define i64 @extractelt_nxv8i64_0(&lt;vscale x 8 x i64&gt; %v) {
 define i64 @extractelt_nxv8i64_imm(&lt;vscale x 8 x i64&gt; %v) {
 ; CHECK-LABEL: extractelt_nxv8i64_imm:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 2
-; CHECK-NEXT:    li a0, 32
-; CHECK-NEXT:    vsrl.vx v16, v8, a0
-; CHECK-NEXT:    vmv.x.s a1, v16
 ; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT:    vsrl.vx v8, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
   %r = extractelement &lt;vscale x 8 x i64&gt; %v, i32 2
   ret i64 %r
@@ -734,10 +739,11 @@ define i64 @extractelt_nxv8i64_imm(&lt;vscale x 8 x i64&gt; %v) {
 define i64 @extractelt_nxv8i64_idx(&lt;vscale x 8 x i64&gt; %v, i32 %idx) {
 ; CHECK-LABEL: extractelt_nxv8i64_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetivli zero, 1, e64, m8, ta, ma
 ; CHECK-NEXT:    vsrl.vx v8, v8, a1
 ; CHECK-NEXT:    vmv.x.s a1, v8
 ; CHECK-NEXT:    ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index f5e5b9e9083b8dc..1c19cf9a864299b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -156,7 +156,7 @@ define signext i8 @extractelt_nxv16i8_imm(&lt;vscale x 16 x i8&gt; %v) {
 define signext i8 @extractelt_nxv16i8_idx(&lt;vscale x 16 x i8&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv16i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -188,7 +188,7 @@ define signext i8 @extractelt_nxv32i8_imm(&lt;vscale x 32 x i8&gt; %v) {
 define signext i8 @extractelt_nxv32i8_idx(&lt;vscale x 32 x i8&gt; %v, i32 zeroext %idx) {
 ; CHECK-LABEL: extractelt_nxv32i8_idx:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m4, ta, ma
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v8, v8, a0
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
@@ -220,7 +220,7 @@ define signext i8 @ext...
<truncated>
</pre>
</details>


https://github.com/llvm/llvm-project/pull/66267


More information about the llvm-commits mailing list