[llvm] 16aef9e - Revert "[RISCV] Use vadd.vi for tail undisturbe vsub intrinsic with small immediate."

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 5 09:51:33 PDT 2024


Author: Craig Topper
Date: 2024-06-05T09:43:24-07:00
New Revision: 16aef9ec5231407140f283f3fe12f17410227da4

URL: https://github.com/llvm/llvm-project/commit/16aef9ec5231407140f283f3fe12f17410227da4
DIFF: https://github.com/llvm/llvm-project/commit/16aef9ec5231407140f283f3fe12f17410227da4.diff

LOG: Revert "[RISCV] Use vadd.vi for tail undisturbe vsub intrinsic with small immediate."

This reverts commit 1af0778f9c6e1ac0f6e2dfd0e56063cc21c4eea5.

I messed up the tail policy.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vsub.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 3af8b65291efc..21c3f1b9c54c6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6297,16 +6297,15 @@ foreach vti = AllIntegerVectors in {
                                                         (XLenVT timm:$policy))>;
 
     // Match VSUB with a small immediate to vadd.vi by negating the immediate.
-    def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$merge),
+    def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector (undef)),
                                           (vti.Vector vti.RegClass:$rs1),
                                           (vti.Scalar simm5_plus1:$rs2),
                                           VLOpFrag)),
-              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX)
-                                                      vti.RegClass:$merge,
-                                                      vti.RegClass:$rs1,
-                                                      (NegImm simm5_plus1:$rs2),
-                                                      GPR:$vl,
-                                                      vti.Log2SEW, TA_MA)>;
+              (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) (vti.Vector (IMPLICIT_DEF)),
+                                                                vti.RegClass:$rs1,
+                                                                (NegImm simm5_plus1:$rs2),
+                                                                GPR:$vl,
+                                                                vti.Log2SEW, TA_MA)>;
     def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
                                                (vti.Vector vti.RegClass:$rs1),
                                                (vti.Scalar simm5_plus1:$rs2),

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub.ll b/llvm/test/CodeGen/RISCV/rvv/vsub.ll
index cb6db10fd03a1..4356834b76df2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub.ll
@@ -2144,22 +2144,6 @@ entry:
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsub_vi_tu_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i8 9,
-    iXLen %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
 define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
@@ -2193,22 +2177,6 @@ entry:
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vsub_vi_tu_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i8 9,
-    iXLen %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
 define <vscale x 2 x i8> @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
@@ -2242,22 +2210,6 @@ entry:
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vsub_vi_tu_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i8 9,
-    iXLen %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
 define <vscale x 4 x i8> @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
@@ -2291,22 +2243,6 @@ entry:
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vsub_vi_tu_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i8 9,
-    iXLen %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
 define <vscale x 8 x i8> @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
@@ -2340,22 +2276,6 @@ entry:
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vsub_vi_tu_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v10, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i8 9,
-    iXLen %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
 define <vscale x 16 x i8> @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
@@ -2389,22 +2309,6 @@ entry:
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vsub_vi_tu_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v12, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i8 9,
-    iXLen %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
 define <vscale x 32 x i8> @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
@@ -2438,22 +2342,6 @@ entry:
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vsub_vi_tu_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v16, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i8 9,
-    iXLen %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
 define <vscale x 64 x i8> @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
@@ -2487,22 +2375,6 @@ entry:
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vsub_vi_tu_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i16 9,
-    iXLen %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
 define <vscale x 1 x i16> @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
@@ -2536,22 +2408,6 @@ entry:
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vsub_vi_tu_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i16 9,
-    iXLen %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
 define <vscale x 2 x i16> @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
@@ -2585,22 +2441,6 @@ entry:
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vsub_vi_tu_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i16 9,
-    iXLen %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
 define <vscale x 4 x i16> @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
@@ -2634,22 +2474,6 @@ entry:
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vsub_vi_tu_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v10, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i16 9,
-    iXLen %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
 define <vscale x 8 x i16> @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
@@ -2683,22 +2507,6 @@ entry:
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vsub_vi_tu_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v12, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i16 9,
-    iXLen %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
 define <vscale x 16 x i16> @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
@@ -2732,22 +2540,6 @@ entry:
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vsub_vi_tu_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v16, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i16 9,
-    iXLen %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
 define <vscale x 32 x i16> @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
@@ -2781,22 +2573,6 @@ entry:
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vsub_vi_tu_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    iXLen %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
 define <vscale x 1 x i32> @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
@@ -2830,22 +2606,6 @@ entry:
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vsub_vi_tu_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    iXLen %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
 define <vscale x 2 x i32> @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
@@ -2879,22 +2639,6 @@ entry:
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vsub_vi_tu_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v10, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    iXLen %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
 define <vscale x 4 x i32> @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
@@ -2928,22 +2672,6 @@ entry:
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vsub_vi_tu_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v12, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    iXLen %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
 define <vscale x 8 x i32> @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
@@ -2977,22 +2705,6 @@ entry:
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vsub_vi_tu_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v16, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 9,
-    iXLen %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
 define <vscale x 16 x i32> @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
@@ -3026,22 +2738,6 @@ entry:
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vsub_vi_tu_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v9, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    iXLen %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
 define <vscale x 1 x i64> @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
@@ -3075,22 +2771,6 @@ entry:
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vsub_vi_tu_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v10, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    iXLen %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
 define <vscale x 2 x i64> @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
@@ -3124,22 +2804,6 @@ entry:
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vsub_vi_tu_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v12, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    iXLen %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
 define <vscale x 4 x i64> @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
@@ -3173,22 +2837,6 @@ entry:
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vsub_vi_tu_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
-; CHECK-LABEL: intrinsic_vsub_vi_tu_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vadd.vi v8, v16, -9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    iXLen %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
 define <vscale x 8 x i64> @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry


        


More information about the llvm-commits mailing list