[llvm] 2919ec0 - [RISCV] Remove side effects from vsetvli intrinsics.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 3 13:04:09 PST 2023


Author: Craig Topper
Date: 2023-02-03T13:03:56-08:00
New Revision: 2919ec041f071c5dab4ad17480af697aaf42d707

URL: https://github.com/llvm/llvm-project/commit/2919ec041f071c5dab4ad17480af697aaf42d707
DIFF: https://github.com/llvm/llvm-project/commit/2919ec041f071c5dab4ad17480af697aaf42d707.diff

LOG: [RISCV] Remove side effects from vsetvli intrinsics.

Delete the opt intrinsics since they are now identical.

I left the side effects due to user expectations about how these
interact with things like inline assembly or function calls. Or
that they wouldn't be hoisted. I think we should look at other
ways to address thoughs.

If I could, I'd rename them these somehow to distance them from
the vsetvli instruction. In some sense they only query the VL for
a particular SEW and LMUL. They don't guarantee a vsetvli
instruction will be emitted.

Fixes https://github.com/llvm/llvm-project/issues/59359

Reviewed By: rogfer01, kito-cheng

Differential Revision: https://reviews.llvm.org/D143220

Added: 
    

Modified: 
    llvm/docs/ReleaseNotes.rst
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
    llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
    llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll

Removed: 
    


################################################################################
diff  --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index a3f02992048a4..8e2e3b0031f39 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -106,6 +106,8 @@ Changes to the RISC-V Backend
 
 * Assembler support for version 1.0.1 of the Zcb extension was added.
 * Zca, Zcf, and Zcd extensions were upgraded to version 1.0.1.
+* vsetvli intrinsics no longer have side effects. They may now be combined,
+  moved, deleted, etc. by optimizations.
 
 Changes to the WebAssembly Backend
 ----------------------------------

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index e9c88f4680763..b140cdbe64375 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -132,32 +132,16 @@ let TargetPrefix = "riscv" in {
                            /* AVL */  [LLVMMatchType<0>,
                            /* VSEW */  LLVMMatchType<0>,
                            /* VLMUL */ LLVMMatchType<0>],
-                                      [IntrNoMem, IntrHasSideEffects,
+                                      [IntrNoMem,
                                        ImmArg<ArgIndex<1>>,
                                        ImmArg<ArgIndex<2>>]>;
   def int_riscv_vsetvlimax : Intrinsic<[llvm_anyint_ty],
                             /* VSEW */ [LLVMMatchType<0>,
                             /* VLMUL */ LLVMMatchType<0>],
-                                      [IntrNoMem, IntrHasSideEffects,
+                                      [IntrNoMem,
                                        ImmArg<ArgIndex<0>>,
                                        ImmArg<ArgIndex<1>>]>;
 
-  // Versions without side effects: better optimizable and usable if only the
-  // returned vector length is important.
-  def int_riscv_vsetvli_opt   : Intrinsic<[llvm_anyint_ty],
-                               /* AVL */  [LLVMMatchType<0>,
-                               /* VSEW */  LLVMMatchType<0>,
-                               /* VLMUL */ LLVMMatchType<0>],
-                                          [IntrNoMem,
-                                           ImmArg<ArgIndex<1>>,
-                                           ImmArg<ArgIndex<2>>]>;
-  def int_riscv_vsetvlimax_opt : Intrinsic<[llvm_anyint_ty],
-                                /* VSEW */ [LLVMMatchType<0>,
-                                /* VLMUL */ LLVMMatchType<0>],
-                                          [IntrNoMem,
-                                           ImmArg<ArgIndex<0>>,
-                                           ImmArg<ArgIndex<1>>]>;
-
   // For unit stride mask load
   // Input: (pointer, vl)
   class RISCVUSMLoad

diff  --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 0e116b78144cd..c86a70c76bc5d 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1735,9 +1735,7 @@ static void computeKnownBitsFromOperator(const Operator *I,
         Known.Zero.setBitsFrom(32);
         break;
       case Intrinsic::riscv_vsetvli:
-      case Intrinsic::riscv_vsetvli_opt:
       case Intrinsic::riscv_vsetvlimax:
-      case Intrinsic::riscv_vsetvlimax_opt:
         // Assume that VL output is >= 65536.
         // TODO: Take SEW and LMUL into account.
         if (BitWidth > 17)

diff  --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 80385ee28a206..7c64474966ebe 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -542,13 +542,10 @@ void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) {
   unsigned IntNo = Node->getConstantOperandVal(IntNoOffset);
 
   assert((IntNo == Intrinsic::riscv_vsetvli ||
-          IntNo == Intrinsic::riscv_vsetvlimax ||
-          IntNo == Intrinsic::riscv_vsetvli_opt ||
-          IntNo == Intrinsic::riscv_vsetvlimax_opt) &&
+          IntNo == Intrinsic::riscv_vsetvlimax) &&
          "Unexpected vsetvli intrinsic");
 
-  bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax ||
-               IntNo == Intrinsic::riscv_vsetvlimax_opt;
+  bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax;
   unsigned Offset = IntNoOffset + (VLMax ? 1 : 2);
 
   assert(Node->getNumOperands() == Offset + 2 &&
@@ -1287,8 +1284,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
                                                {Cmp, Mask, VL, MaskSEW}));
       return;
     }
-    case Intrinsic::riscv_vsetvli_opt:
-    case Intrinsic::riscv_vsetvlimax_opt:
+    case Intrinsic::riscv_vsetvli:
+    case Intrinsic::riscv_vsetvlimax:
       return selectVSETVLI(Node);
     }
     break;
@@ -1299,9 +1296,6 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
       // By default we do not custom select any intrinsic.
     default:
       break;
-    case Intrinsic::riscv_vsetvli:
-    case Intrinsic::riscv_vsetvlimax:
-      return selectVSETVLI(Node);
     case Intrinsic::riscv_vlseg2:
     case Intrinsic::riscv_vlseg3:
     case Intrinsic::riscv_vlseg4:

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 5ed580f41c10b..2c73f15f51347 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -5515,7 +5515,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
         unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits());
         SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
         SDValue SETVLMAX = DAG.getTargetConstant(
-            Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32);
+            Intrinsic::riscv_vsetvlimax, DL, MVT::i32);
         I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW,
                             LMUL);
       } else {
@@ -5530,7 +5530,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG,
       unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits());
       SDValue SEW = DAG.getConstant(Sew, DL, XLenVT);
       SDValue SETVL =
-          DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32);
+          DAG.getTargetConstant(Intrinsic::riscv_vsetvli, DL, MVT::i32);
       // Using vsetvli instruction to get actually used length which related to
       // the hardware implementation
       SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL,
@@ -10884,8 +10884,6 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
       break;
     case Intrinsic::riscv_vsetvli:
     case Intrinsic::riscv_vsetvlimax:
-    case Intrinsic::riscv_vsetvli_opt:
-    case Intrinsic::riscv_vsetvlimax_opt:
       // Assume that VL output is >= 65536.
       // TODO: Take SEW and LMUL into account.
       if (BitWidth > 17)

diff  --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 348d88ab9ef95..993decccfcbac 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -65,15 +65,13 @@ define void @_Z3foov() {
 ; CHECK-NEXT:    add a1, a1, a2
 ; CHECK-NEXT:    vl2r.v v16, (a1) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vle16.v v16, (a0)
-; CHECK-NEXT:    vsetivli zero, 2, e16, m2, ta, mu
 ; CHECK-NEXT:    lui a0, %hi(.L__const._Z3foov.var_40)
 ; CHECK-NEXT:    addi a0, a0, %lo(.L__const._Z3foov.var_40)
 ; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    vsetivli zero, 2, e16, m2, ta, mu
 ; CHECK-NEXT:    lui a0, 1048572
 ; CHECK-NEXT:    addiw a0, a0, 928
 ; CHECK-NEXT:    vmsbc.vx v0, v8, a0
-; CHECK-NEXT:    vsetivli zero, 2, e16, m2, tu, mu
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, tu, mu
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, sp, a0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
index d276d725cc7b9..5cb3536683677 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mutate-prior-vsetvli-avl.ll
@@ -12,16 +12,14 @@ define dso_local void @test(ptr nocapture noundef %var_99) {
 ; CHECK-NEXT:    addi a1, a1, %lo(.L__const.test.var_45)
 ; CHECK-NEXT:    vsetivli zero, 2, e8, m4, ta, ma
 ; CHECK-NEXT:    vle8.v v8, (a1)
-; CHECK-NEXT:    li a1, 1
-; CHECK-NEXT:    vmul.vx v12, v8, a1
 ; CHECK-NEXT:    lui a1, %hi(.L__const.test.var_101)
 ; CHECK-NEXT:    addi a1, a1, %lo(.L__const.test.var_101)
-; CHECK-NEXT:    vle8.v v16, (a1)
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 2, e8, m4, ta, ma
-; CHECK-NEXT:    vmv.x.s a1, v12
+; CHECK-NEXT:    vle8.v v12, (a1)
+; CHECK-NEXT:    li a1, 1
+; CHECK-NEXT:    vmul.vx v16, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v16
 ; CHECK-NEXT:    vmsleu.vx v0, v8, a1
-; CHECK-NEXT:    vssra.vv v8, v16, v8
+; CHECK-NEXT:    vssra.vv v8, v12, v8
 ; CHECK-NEXT:    vmerge.vvm v8, v8, v8, v0
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 024e657efbc7e..7937076a80a4b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -838,15 +838,10 @@ fallthrough:
 define <vscale x 2 x i32> @pre_lmul(<vscale x 2 x i32> %x, <vscale x 2 x i32> %y, i1 %cond) nounwind {
 ; CHECK-LABEL: pre_lmul:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    andi a1, a0, 1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    andi a0, a0, 1
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
+; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
-; CHECK-NEXT:    beqz a1, .LBB18_2
-; CHECK-NEXT:  # %bb.1: # %if
-; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
-; CHECK-NEXT:  .LBB18_2: # %if.end
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index 4c82f5d32ecc6..0aa09d1e34441 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -152,7 +152,7 @@ for.body:                                         ; preds = %entry, %for.body
 define <vscale x 1 x i64> @test7(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: test7:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a1, zero, e64, m1, tu, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -167,7 +167,7 @@ entry:
 define <vscale x 1 x i64> @test8(<vscale x 1 x i64> %a, i64 %b, <vscale x 1 x i1> %mask) nounwind {
 ; CHECK-LABEL: test8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 6, e64, m1, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv.s.x v8, a0
 ; CHECK-NEXT:    ret
 entry:
@@ -198,7 +198,7 @@ entry:
 define <vscale x 1 x double> @test10(<vscale x 1 x double> %a, double %b) nounwind {
 ; CHECK-LABEL: test10:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, tu, ma
+; CHECK-NEXT:    vsetivli zero, 1, e64, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -211,7 +211,7 @@ entry:
 define <vscale x 1 x double> @test11(<vscale x 1 x double> %a, double %b) nounwind {
 ; CHECK-LABEL: test11:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 6, e64, m1, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; CHECK-NEXT:    vfmv.s.f v8, fa0
 ; CHECK-NEXT:    ret
 entry:
@@ -382,7 +382,7 @@ entry:
 define <vscale x 1 x double> @test19(<vscale x 1 x double> %a, double %b) nounwind {
 ; CHECK-LABEL: test19:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetivli zero, 6, e64, m1, tu, ma
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, tu, ma
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vfmv.s.f v9, fa0
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
index c5fdcd72c8fa2..7335d1c666448 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll
@@ -6,143 +6,96 @@
 
 declare iXLen @llvm.riscv.vsetvli.iXLen(iXLen, iXLen, iXLen)
 declare iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen, iXLen)
-declare iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen, iXLen, iXLen)
-declare iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen, iXLen)
 
-define void @test_vsetvli_e8m1(iXLen %avl) nounwind {
+define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind {
 ; CHECK-LABEL: test_vsetvli_e8m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
-  ret void
+  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
+  ret iXLen %vl
 }
 
-define void @test_vsetvli_e16mf4(iXLen %avl) nounwind {
+define iXLen @test_vsetvli_e16mf4(iXLen %avl) nounwind {
 ; CHECK-LABEL: test_vsetvli_e16mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6)
-  ret void
+  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6)
+  ret iXLen %vl
 }
 
-define void @test_vsetvli_e64mf8(iXLen %avl) nounwind {
+define iXLen @test_vsetvli_e64mf8(iXLen %avl) nounwind {
 ; CHECK-LABEL: test_vsetvli_e64mf8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, mf8, ta, mu
+; CHECK-NEXT:    vsetvli a0, a0, e64, mf8, ta, mu
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5)
-  ret void
+  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5)
+  ret iXLen %vl
 }
 
-define void @test_vsetvli_e8mf2_zero_avl() nounwind {
+define iXLen @test_vsetvli_e8mf2_zero_avl() nounwind {
 ; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e8, mf2, ta, mu
+; CHECK-NEXT:    vsetivli a0, 0, e8, mf2, ta, mu
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7)
-  ret void
+  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7)
+  ret iXLen %vl
 }
 
-define void @test_vsetvli_e32mf8_zero_avl() nounwind {
+define iXLen @test_vsetvli_e32mf8_zero_avl() nounwind {
 ; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 0, e16, mf4, ta, mu
+; CHECK-NEXT:    vsetivli a0, 0, e16, mf4, ta, mu
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6)
-  ret void
+  %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6)
+  ret iXLen %vl
 }
 
-define void @test_vsetvlimax_e32m2() nounwind {
+define iXLen @test_vsetvlimax_e32m2() nounwind {
 ; CHECK-LABEL: test_vsetvlimax_e32m2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
-  ret void
+  %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
+  ret iXLen %vl
 }
 
-define void @test_vsetvlimax_e64m4() nounwind {
+define iXLen @test_vsetvlimax_e64m4() nounwind {
 ; CHECK-LABEL: test_vsetvlimax_e64m4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2)
-  ret void
+  %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2)
+  ret iXLen %vl
 }
 
-define void @test_vsetvlimax_e64m8() nounwind {
+define iXLen @test_vsetvlimax_e64m8() nounwind {
 ; CHECK-LABEL: test_vsetvlimax_e64m8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3)
-  ret void
-}
-
-define iXLen @test_vsetvli_opt_e8m1(iXLen %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e8m1:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e8, m1, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 0, iXLen 0)
+  %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3)
   ret iXLen %vl
 }
 
 ; Check that we remove the intrinsic if it's unused.
-define void @test_vsetvli_opt_e8m1_nouse(iXLen %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e8m1_nouse:
+define void @test_vsetvli_e8m1_nouse(iXLen %avl) nounwind {
+; CHECK-LABEL: test_vsetvli_e8m1_nouse:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 0, iXLen 0)
+  call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0)
   ret void
 }
 
-define iXLen @test_vsetvli_opt_e16mf4(iXLen %avl) nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e16mf4:
+define void @test_vsetvlimax_e32m2_nouse() nounwind {
+; CHECK-LABEL: test_vsetvlimax_e32m2_nouse:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen %avl, iXLen 1, iXLen 6)
-  ret iXLen %vl
-}
-
-define iXLen @test_vsetvli_opt_e32mf8_zero_avl() nounwind {
-; CHECK-LABEL: test_vsetvli_opt_e32mf8_zero_avl:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli a0, 0, e16, mf4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call iXLen @llvm.riscv.vsetvli.opt.iXLen(iXLen 0, iXLen 1, iXLen 6)
-  ret iXLen %vl
-}
-
-define iXLen @test_vsetvlimax_opt_e32m2() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e32m2:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; CHECK-NEXT:    ret
-  %vl = call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 2, iXLen 1)
-  ret iXLen %vl
-}
-
-define void @test_vsetvlimax_opt_e32m2_nouse() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e32m2_nouse:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    ret
-  call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 2, iXLen 1)
+  call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1)
   ret void
 }
 
-define iXLen @test_vsetvlimax_opt_e64m4() nounwind {
-; CHECK-LABEL: test_vsetvlimax_opt_e64m4:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    ret
-  %vl = call iXLen @llvm.riscv.vsetvlimax.opt.iXLen(iXLen 3, iXLen 2)
-  ret iXLen %vl
-}
-
 declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32.iXLen(<vscale x 4 x i32>, <vscale x 4 x i32>*, iXLen)
 
 ; Check that we remove the redundant vsetvli when followed by another operation

diff  --git a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
index a1eea5bcc2dd0..51f78688b13ed 100644
--- a/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
+++ b/llvm/test/Transforms/InstCombine/RISCV/riscv-vsetvli-knownbits.ll
@@ -5,10 +5,6 @@ declare i32 @llvm.riscv.vsetvli.i32(i32, i32, i32)
 declare i64 @llvm.riscv.vsetvli.i64(i64, i64, i64)
 declare i32 @llvm.riscv.vsetvlimax.i32(i32, i32)
 declare i64 @llvm.riscv.vsetvlimax.i64(i64, i64)
-declare i32 @llvm.riscv.vsetvli.opt.i32(i32, i32, i32)
-declare i64 @llvm.riscv.vsetvli.opt.i64(i64, i64, i64)
-declare i32 @llvm.riscv.vsetvlimax.opt.i32(i32, i32)
-declare i64 @llvm.riscv.vsetvlimax.opt.i64(i64, i64)
 
 define i32 @vsetvli_i32() nounwind {
 ; CHECK-LABEL: @vsetvli_i32(
@@ -133,127 +129,3 @@ entry:
   %1 = and i64 %0, 131071
   ret i64 %1
 }
-
-define i32 @vsetvli_opt_i32() nounwind {
-; CHECK-LABEL: @vsetvli_opt_i32(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1)
-; CHECK-NEXT:    ret i32 [[TMP0]]
-;
-entry:
-  %0 = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1)
-  %1 = and i32 %0, 2147483647
-  ret i32 %1
-}
-
-define i64 @vsetvli_opt_sext_i64() nounwind {
-; CHECK-LABEL: @vsetvli_opt_sext_i64(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
-; CHECK-NEXT:    ret i64 [[TMP0]]
-;
-entry:
-  %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
-  %1 = trunc i64 %0 to i32
-  %2 = sext i32 %1 to i64
-  ret i64 %2
-}
-
-define i64 @vsetvli_opt_zext_i64() nounwind {
-; CHECK-LABEL: @vsetvli_opt_zext_i64(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
-; CHECK-NEXT:    ret i64 [[TMP0]]
-;
-entry:
-  %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
-  %1 = trunc i64 %0 to i32
-  %2 = zext i32 %1 to i64
-  ret i64 %2
-}
-
-define i32 @vsetvli_opt_and17_i32() nounwind {
-; CHECK-LABEL: @vsetvli_opt_and17_i32(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1)
-; CHECK-NEXT:    ret i32 [[TMP0]]
-;
-entry:
-  %0 = call i32 @llvm.riscv.vsetvli.opt.i32(i32 1, i32 1, i32 1)
-  %1 = and i32 %0, 131071
-  ret i32 %1
-}
-
-define i64 @vsetvli_opt_and17_i64() nounwind {
-; CHECK-LABEL: @vsetvli_opt_and17_i64(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
-; CHECK-NEXT:    ret i64 [[TMP0]]
-;
-entry:
-  %0 = call i64 @llvm.riscv.vsetvli.opt.i64(i64 1, i64 1, i64 1)
-  %1 = and i64 %0, 131071
-  ret i64 %1
-}
-
-define i32 @vsetvlimax_opt_i32() nounwind {
-; CHECK-LABEL: @vsetvlimax_opt_i32(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1)
-; CHECK-NEXT:    ret i32 [[TMP0]]
-;
-entry:
-  %0 = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1)
-  %1 = and i32 %0, 2147483647
-  ret i32 %1
-}
-
-define i64 @vsetvlimax_opt_sext_i64() nounwind {
-; CHECK-LABEL: @vsetvlimax_opt_sext_i64(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
-; CHECK-NEXT:    ret i64 [[TMP0]]
-;
-entry:
-  %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
-  %1 = trunc i64 %0 to i32
-  %2 = sext i32 %1 to i64
-  ret i64 %2
-}
-
-define i64 @vsetvlimax_opt_zext_i64() nounwind {
-; CHECK-LABEL: @vsetvlimax_opt_zext_i64(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
-; CHECK-NEXT:    ret i64 [[TMP0]]
-;
-entry:
-  %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
-  %1 = trunc i64 %0 to i32
-  %2 = zext i32 %1 to i64
-  ret i64 %2
-}
-
-define i32 @vsetvlimax_opt_and17_i32() nounwind {
-; CHECK-LABEL: @vsetvlimax_opt_and17_i32(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1)
-; CHECK-NEXT:    ret i32 [[TMP0]]
-;
-entry:
-  %0 = call i32 @llvm.riscv.vsetvlimax.opt.i32(i32 1, i32 1)
-  %1 = and i32 %0, 131071
-  ret i32 %1
-}
-
-define i64 @vsetvlimax_opt_and17_i64() nounwind {
-; CHECK-LABEL: @vsetvlimax_opt_and17_i64(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
-; CHECK-NEXT:    ret i64 [[TMP0]]
-;
-entry:
-  %0 = call i64 @llvm.riscv.vsetvlimax.opt.i64(i64 1, i64 1)
-  %1 = and i64 %0, 131071
-  ret i64 %1
-}


        


More information about the llvm-commits mailing list