[llvm] 6fdc77e - [RISCV] Don't reduce vslidedown's VL in rotations

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Thu May 25 01:28:01 PDT 2023


Author: Luke Lau
Date: 2023-05-25T09:27:55+01:00
New Revision: 6fdc77e4884ba22453c0042ab6b11cf43499958d

URL: https://github.com/llvm/llvm-project/commit/6fdc77e4884ba22453c0042ab6b11cf43499958d
DIFF: https://github.com/llvm/llvm-project/commit/6fdc77e4884ba22453c0042ab6b11cf43499958d.diff

LOG: [RISCV] Don't reduce vslidedown's VL in rotations

Even though we only need to write to the bottom NumElts - Rotation
elements for the vslidedown.vi, we can save an extra vsetivli toggle if
we just keep the wide VL.

(I may be missing something here: is there a reason why we want to explicitly keep the vslidedown narrow?)

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D151390

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 0728d7e968b4..3f8a45e173d4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -3945,16 +3945,10 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
 
     SDValue Res = DAG.getUNDEF(ContainerVT);
     if (HiV) {
-      // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
-      // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
-      // causes multiple vsetvlis in some test cases such as lowering
-      // reduce.mul
-      SDValue DownVL = VL;
-      if (LoV)
-        DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
+      // Even though we could use a smaller VL, don't to avoid a vsetivli
+      // toggle.
       Res = getVSlidedown(DAG, Subtarget, DL, ContainerVT, Res, HiV,
-                          DAG.getConstant(Rotation, DL, XLenVT), TrueMask,
-                          DownVL);
+                          DAG.getConstant(Rotation, DL, XLenVT), TrueMask, VL);
     }
     if (LoV)
       Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll
index 99d7b4963db7..be216376ce2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll
@@ -115,9 +115,8 @@ define <1 x i8> @reverse_v1i8(<1 x i8> %a) {
 define <2 x i8> @reverse_v2i8(<2 x i8> %a) {
 ; CHECK-LABEL: reverse_v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -205,9 +204,8 @@ define <1 x i16> @reverse_v1i16(<1 x i16> %a) {
 define <2 x i16> @reverse_v2i16(<2 x i16> %a) {
 ; CHECK-LABEL: reverse_v2i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -280,9 +278,8 @@ define <1 x i32> @reverse_v1i32(<1 x i32> %a) {
 define <2 x i32> @reverse_v2i32(<2 x i32> %a) {
 ; CHECK-LABEL: reverse_v2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -340,9 +337,8 @@ define <1 x i64> @reverse_v1i64(<1 x i64> %a) {
 define <2 x i64> @reverse_v2i64(<2 x i64> %a) {
 ; CHECK-LABEL: reverse_v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
@@ -484,9 +480,8 @@ define <1 x half> @reverse_v1f16(<1 x half> %a) {
 define <2 x half> @reverse_v2f16(<2 x half> %a) {
 ; CHECK-LABEL: reverse_v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -559,9 +554,8 @@ define <1 x float> @reverse_v1f32(<1 x float> %a) {
 define <2 x float> @reverse_v2f32(<2 x float> %a) {
 ; CHECK-LABEL: reverse_v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -619,9 +613,8 @@ define <1 x double> @reverse_v1f64(<1 x double> %a) {
 define <2 x double> @reverse_v2f64(<2 x double> %a) {
 ; CHECK-LABEL: reverse_v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index 1c685aaa55b6..4d7d08e14f0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -304,9 +304,8 @@ define <8 x float> @slideup_v8f32(<8 x float> %x) {
 define <8 x float> @splice_unary(<8 x float> %x) {
 ; CHECK-LABEL: splice_unary:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 7, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vslideup.vi v10, v8, 7
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -317,9 +316,8 @@ define <8 x float> @splice_unary(<8 x float> %x) {
 define <8 x double> @splice_unary2(<8 x double> %x) {
 ; CHECK-LABEL: splice_unary2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 2, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v12, v8, 6
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v12, v8, 6
 ; CHECK-NEXT:    vslideup.vi v12, v8, 2
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -330,9 +328,8 @@ define <8 x double> @splice_unary2(<8 x double> %x) {
 define <8 x float> @splice_binary(<8 x float> %x, <8 x float> %y) {
 ; CHECK-LABEL: splice_binary:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 6, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vslideup.vi v8, v10, 6
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
@@ -342,9 +339,8 @@ define <8 x float> @splice_binary(<8 x float> %x, <8 x float> %y) {
 define <8 x double> @splice_binary2(<8 x double> %x, <8 x double> %y) {
 ; CHECK-LABEL: splice_binary2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 3, e64, m4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v12, v12, 5
 ; CHECK-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v12, v12, 5
 ; CHECK-NEXT:    vslideup.vi v12, v8, 3
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index 493316ff3dbc..5f07294c6c30 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -583,9 +583,8 @@ define <8 x i32> @slideup_v8i32(<8 x i32> %x) {
 define <8 x i16> @splice_unary(<8 x i16> %x) {
 ; CHECK-LABEL: splice_unary:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 2
 ; CHECK-NEXT:    vslideup.vi v9, v8, 6
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
@@ -596,9 +595,8 @@ define <8 x i16> @splice_unary(<8 x i16> %x) {
 define <8 x i32> @splice_unary2(<8 x i32> %x) {
 ; CHECK-LABEL: splice_unary2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 3, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 5
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 5
 ; CHECK-NEXT:    vslideup.vi v10, v8, 3
 ; CHECK-NEXT:    vmv.v.v v8, v10
 ; CHECK-NEXT:    ret
@@ -609,9 +607,8 @@ define <8 x i32> @splice_unary2(<8 x i32> %x) {
 define <8 x i16> @splice_binary(<8 x i16> %x, <8 x i16> %y) {
 ; CHECK-LABEL: splice_binary:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 6, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 2
 ; CHECK-NEXT:    vslideup.vi v8, v9, 6
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
@@ -621,9 +618,8 @@ define <8 x i16> @splice_binary(<8 x i16> %x, <8 x i16> %y) {
 define <8 x i32> @splice_binary2(<8 x i32> %x, <8 x i32> %y) {
 ; CHECK-LABEL: splice_binary2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 3, e32, m2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5
 ; CHECK-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-NEXT:    ret
   %s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
index 36a8c44614e9..bf8aaa41b899 100644
--- a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
@@ -5,9 +5,8 @@
 define <2 x i8> @v2i8(<2 x i8> %a) {
 ; CHECK-LABEL: v2i8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -18,9 +17,8 @@ define <2 x i8> @v2i8(<2 x i8> %a) {
 define <4 x i8> @v2i8_2(<2 x i8> %a, <2 x i8> %b) {
 ; CHECK-LABEL: v2i8_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -153,9 +151,8 @@ define <32 x i8> @v16i8_2(<16 x i8> %a, <16 x i8> %b) {
 define <2 x i16> @v2i16(<2 x i16> %a) {
 ; CHECK-LABEL: v2i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -166,9 +163,8 @@ define <2 x i16> @v2i16(<2 x i16> %a) {
 define <4 x i16> @v2i16_2(<2 x i16> %a, <2 x i16> %b) {
 ; CHECK-LABEL: v2i16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -300,9 +296,8 @@ define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
 define <2 x i32> @v2i32(<2 x i32> %a) {
 ; CHECK-LABEL: v2i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -313,9 +308,8 @@ define <2 x i32> @v2i32(<2 x i32> %a) {
 define <4 x i32> @v2i32_2(<2 x i32> %a, < 2 x i32> %b) {
 ; CHECK-LABEL: v2i32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -444,9 +438,8 @@ define <32 x i32> @v16i32_2(<16 x i32> %a, <16 x i32> %b) {
 define <2 x i64> @v2i64(<2 x i64> %a) {
 ; CHECK-LABEL: v2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
@@ -457,9 +450,8 @@ define <2 x i64> @v2i64(<2 x i64> %a) {
 define <4 x i64> @v2i64_2(<2 x i64> %a, < 2 x i64> %b) {
 ; CHECK-LABEL: v2i64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -531,9 +523,8 @@ define <8 x i64> @v4i64_2(<4 x i64> %a, <4 x i64> %b) {
 define <2 x half> @v2f16(<2 x half> %a) {
 ; CHECK-LABEL: v2f16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -544,9 +535,8 @@ define <2 x half> @v2f16(<2 x half> %a) {
 define <4 x half> @v2f16_2(<2 x half> %a, <2 x half> %b) {
 ; CHECK-LABEL: v2f16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -652,9 +642,8 @@ define <32 x half> @v16f16_2(<16 x half> %a) {
 define <2 x float> @v2f32(<2 x float> %a) {
 ; CHECK-LABEL: v2f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv1r.v v8, v9
 ; CHECK-NEXT:    ret
@@ -665,9 +654,8 @@ define <2 x float> @v2f32(<2 x float> %a) {
 define <4 x float> @v2f32_2(<2 x float> %a, <2 x float> %b) {
 ; CHECK-LABEL: v2f32_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1
@@ -743,9 +731,8 @@ define <16 x float> @v8f32_2(<8 x float> %a, <8 x float> %b) {
 define <2 x double> @v2f64(<2 x double> %a) {
 ; CHECK-LABEL: v2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v9, v8, 1
 ; CHECK-NEXT:    vslideup.vi v9, v8, 1
 ; CHECK-NEXT:    vmv.v.v v8, v9
 ; CHECK-NEXT:    ret
@@ -756,9 +743,8 @@ define <2 x double> @v2f64(<2 x double> %a) {
 define <4 x double> @v2f64_2(<2 x double> %a, < 2 x double> %b) {
 ; CHECK-LABEL: v2f64_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vslideup.vi v8, v9, 1


        


More information about the llvm-commits mailing list