[llvm] 04f10ab - [RISCV] Add isel patterns to select vsub_vx intrinsic to vadd.vi if it uses a small enough immediate

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 31 09:27:16 PDT 2021


Author: Craig Topper
Date: 2021-03-31T09:26:41-07:00
New Revision: 04f10ab367b5c547f5de3285890e74146a5949b0

URL: https://github.com/llvm/llvm-project/commit/04f10ab367b5c547f5de3285890e74146a5949b0
DIFF: https://github.com/llvm/llvm-project/commit/04f10ab367b5c547f5de3285890e74146a5949b0.diff

LOG: [RISCV] Add isel patterns to select vsub_vx intrinsic to vadd.vi if it uses a small enough immediate

Also modify the simm5_plus1 check because Imm-1 is UB if Imm happens
to be INT64_MIN. I don't think the compiler would optimize based on that in this
usage, but it could fail UBSan or -ftrapv.

Reviewed By: HsiangKai, frasercrmck

Differential Revision: https://reviews.llvm.org/D99637

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index 5f51aac6089cf..44208c86faf41 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -67,12 +67,12 @@ def SImm5Plus1AsmOperand : AsmOperandClass {
 }
 
 def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
-                                           [{return isInt<5>(Imm - 1);}]> {
+  [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
   let ParserMatchClass = SImm5Plus1AsmOperand;
   let MCOperandPredicate = [{
     int64_t Imm;
     if (MCOp.evaluateAsConstantImm(Imm))
-      return isInt<5>(Imm - 1);
+      return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
     return MCOp.isBareSymbolRef();
   }];
 }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 98e14a83b4da1..c73b4606fc501 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3278,6 +3278,29 @@ defm PseudoVADD        : VPseudoBinaryV_VV_VX_VI;
 defm PseudoVSUB        : VPseudoBinaryV_VV_VX;
 defm PseudoVRSUB       : VPseudoBinaryV_VX_VI;
 
+foreach vti = AllIntegerVectors in {
+  // Match VSUB with a small immediate to vadd.vi by negating the immediate.
+  def : Pat<(vti.Vector (int_riscv_vsub (vti.Vector vti.RegClass:$rs1),
+                                        (vti.Scalar simm5_plus1:$rs2),
+                                        (XLenVT (VLOp GPR:$vl)))),
+            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
+                                                              (NegImm simm5_plus1:$rs2),
+                                                              GPR:$vl,
+                                                              vti.SEW)>;
+  def : Pat<(vti.Vector (int_riscv_vsub_mask (vti.Vector vti.RegClass:$merge),
+                                             (vti.Vector vti.RegClass:$rs1),
+                                             (vti.Scalar simm5_plus1:$rs2),
+                                             (vti.Mask V0),
+                                             (XLenVT (VLOp GPR:$vl)))),
+            (!cast<Instruction>("PseudoVADD_VI_"#vti.LMul.MX#"_MASK")
+                                                      vti.RegClass:$merge,
+                                                      vti.RegClass:$rs1,
+                                                      (NegImm simm5_plus1:$rs2),
+                                                      (vti.Mask V0),
+                                                      GPR:$vl,
+                                                      vti.SEW)>;
+}
+
 //===----------------------------------------------------------------------===//
 // 12.2. Vector Widening Integer Add/Subtract
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
index e8c9cb77155d7..1e67aeeac69da 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv32.ll
@@ -1587,3 +1587,579 @@ entry:
 
   ret <vscale x 16 x i32> %a
 }
+
+define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 -9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv2i8_nxv2i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 -9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv4i8_nxv4i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 -9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv8i8_nxv8i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 -9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv16i8_nxv16i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 -9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv32i8_nxv32i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    i32 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 -9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv64i8_nxv64i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, 9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 -9,
+    i32 %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 -9,
+    <vscale x 64 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv1i16_nxv1i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 -9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv2i16_nxv2i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 -9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv4i16_nxv4i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 -9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv8i16_nxv8i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 -9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv16i16_nxv16i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 -9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv32i16_nxv32i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    i32 %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 -9,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv1i32_nxv1i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 -9,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv2i32_nxv2i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 -9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv4i32_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v10, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 -9,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv8i32_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v12, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 -9,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv16i32_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    i32 %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v16, 9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 -9,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i32> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
index 0336f23f2b52a..d07fcc6c2411f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsub-rv64.ll
@@ -1940,3 +1940,707 @@ entry:
 
   ret <vscale x 8 x i64> %a
 }
+
+define <vscale x 1 x i8> @intrinsic_vsub_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv1i8_nxv1i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vsub.mask.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8> %1,
+    i8 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsub_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv2i8_nxv2i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsub.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i8_nxv2i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vsub.mask.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8> %1,
+    i8 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsub_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv4i8_nxv4i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsub.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i8_nxv4i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,mf2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vsub.mask.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8> %1,
+    i8 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsub_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv8i8_nxv8i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsub.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i8_nxv8i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m1,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vsub.mask.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8> %1,
+    i8 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsub_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv16i8_nxv16i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsub.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i8_nxv16i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i8> @llvm.riscv.vsub.mask.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8> %1,
+    i8 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsub_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv32i8_nxv32i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsub.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i8_nxv32i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i8> @llvm.riscv.vsub.mask.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8> %1,
+    i8 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsub_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv64i8_nxv64i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsub.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    i8 9,
+    i64 %1)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv64i8_nxv64i8_i8:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e8,m8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 64 x i8> @llvm.riscv.vsub.mask.nxv64i8.i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8> %1,
+    i8 9,
+    <vscale x 64 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsub_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv1i16_nxv1i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsub.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i16_nxv1i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vsub.mask.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16> %1,
+    i16 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsub_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv2i16_nxv2i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsub.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i16_nxv2i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vsub.mask.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16> %1,
+    i16 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsub_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv4i16_nxv4i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsub.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i16_nxv4i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m1,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vsub.mask.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16> %1,
+    i16 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsub_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv8i16_nxv8i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsub.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i16_nxv8i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vsub.mask.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16> %1,
+    i16 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsub_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv16i16_nxv16i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsub.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i16_nxv16i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i16> @llvm.riscv.vsub.mask.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16> %1,
+    i16 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsub_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv32i16_nxv32i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsub.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    i16 9,
+    i64 %1)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv32i16_nxv32i16_i16:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e16,m8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 32 x i16> @llvm.riscv.vsub.mask.nxv32i16.i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16> %1,
+    i16 9,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsub_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv1i32_nxv1i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsub.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i32_nxv1i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vsub.mask.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32> %1,
+    i32 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsub_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv2i32_nxv2i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsub.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i32_nxv2i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m1,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vsub.mask.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsub_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv4i32_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsub.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i32_nxv4i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vsub.mask.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32> %1,
+    i32 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsub_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv8i32_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsub.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i32_nxv8i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vsub.mask.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32> %1,
+    i32 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsub_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv16i32_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsub.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    i32 9,
+    i64 %1)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv16i32_nxv16i32_i32:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e32,m8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 16 x i32> @llvm.riscv.vsub.mask.nxv16i32.i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32> %1,
+    i32 9,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vsub_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv1i64_nxv1i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsub.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i64_nxv1i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m1,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v9, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vsub.mask.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64> %1,
+    i64 9,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vsub_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv2i64_nxv2i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsub.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv2i64_nxv2i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m2,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v10, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vsub.mask.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64> %1,
+    i64 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vsub_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv4i64_nxv4i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsub.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv4i64_nxv4i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m4,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v12, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vsub.mask.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64> %1,
+    i64 9,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vsub_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vsub_vi_nxv8i64_nxv8i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT:    vadd.vi v8, v8, -9
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsub.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    i64 9,
+    i64 %1)
+
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv8i64_nxv8i64_i64:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    vsetvli a0, a0, e64,m8,tu,mu
+; CHECK-NEXT:    vadd.vi v8, v16, -9, v0.t
+; CHECK-NEXT:    jalr zero, 0(ra)
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vsub.mask.nxv8i64.i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64> %1,
+    i64 9,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i64> %a
+}


        


More information about the llvm-commits mailing list