[llvm] 09468a9 - [RISCV] Sign extend constant arguments to V intrinsics when promoting to XLen.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 18 11:44:26 PST 2020


Author: Craig Topper
Date: 2020-12-18T11:43:38-08:00
New Revision: 09468a914827b33fe6fe1fcf05ba87c60709d49d

URL: https://github.com/llvm/llvm-project/commit/09468a914827b33fe6fe1fcf05ba87c60709d49d
DIFF: https://github.com/llvm/llvm-project/commit/09468a914827b33fe6fe1fcf05ba87c60709d49d.diff

LOG: [RISCV] Sign extend constant arguments to V intrinsics when promoting to XLen.

The default behavior for any_extend of a constant is to zero extend.
This occurs inside of getNode rather than allowing type legalization
to promote the constant which would sign extend. By using sign extend
with getNode the constant will be sign extended. This gives a better
chance for isel to find a simm5 immediate since all xlen bits are
examined there.

For instructions that use a uimm5 immediate, this change only affects
constants >= 128 for i8 or >= 32768 for i16. Constants that large
already wouldn't have been eligible for uimm5 and would need to use a
scalar register.

If the instruction isn't able to use simm5 or the immediate is
too large, we'll need to materialize the immediate in a register.
As far as I know constants with all 1s in the upper bits should
materialize as well or better than all 0s.

Longer term we should probably have a SEW aware PatFrag to ignore
the bits above SEW before checking simm5.

I updated about half the test cases in some tests to use a negative
constant to get coverage for this.

Reviewed By: evandro

Differential Revision: https://reviews.llvm.org/D93487

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index a484669e7a0b..5334666baf22 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1045,8 +1045,13 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
         EVT OpVT = ScalarOp.getValueType();
         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
-          ScalarOp =
-              DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
+          // If the operand is a constant, sign extend to increase our chances
+          // of being able to use a .vi instruction. ANY_EXTEND would become a
+          // a zero extend and the simm5 check in isel would fail.
+          // FIXME: Should we ignore the upper bits in isel instead?
+          unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
+                                                          : ISD::ANY_EXTEND;
+          ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
           return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
                              Operands);
         }
@@ -1087,9 +1092,15 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
         EVT OpVT = ScalarOp.getValueType();
         if (OpVT == MVT::i8 || OpVT == MVT::i16 ||
             (OpVT == MVT::i32 && Subtarget.is64Bit())) {
-          ScalarOp =
-              DAG.getNode(ISD::ANY_EXTEND, DL, Subtarget.getXLenVT(), ScalarOp);
-          return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(), Operands);
+          // If the operand is a constant, sign extend to increase our chances
+          // of being able to use a .vi instruction. ANY_EXTEND would become a
+          // a zero extend and the simm5 check in isel would fail.
+          // FIXME: Should we ignore the upper bits in isel instead?
+          unsigned ExtOpc = isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND
+                                                          : ISD::ANY_EXTEND;
+          ScalarOp = DAG.getNode(ExtOpc, DL, Subtarget.getXLenVT(), ScalarOp);
+          return DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL, Op->getVTList(),
+                             Operands);
         }
       }
     }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
index 0557e58946ce..b4861b3c3e0a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
@@ -724,10 +724,10 @@ define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8>
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
     <vscale x 1 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 1 x i1> %1,
     i32 %2)
 
@@ -752,10 +752,10 @@ define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8>
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
     <vscale x 4 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 4 x i1> %1,
     i32 %2)
 
@@ -780,10 +780,10 @@ define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
     <vscale x 16 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 16 x i1> %1,
     i32 %2)
 
@@ -808,10 +808,10 @@ define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 64 x i1> %1,
     i32 %2)
 
@@ -836,10 +836,10 @@ define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
     <vscale x 2 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 2 x i1> %1,
     i32 %2)
 
@@ -864,10 +864,10 @@ define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
     <vscale x 8 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 8 x i1> %1,
     i32 %2)
 
@@ -892,10 +892,10 @@ define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 3
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
     <vscale x 32 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 32 x i1> %1,
     i32 %2)
 
@@ -920,10 +920,10 @@ define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
     <vscale x 2 x i32> %0,
-    i32 9,
+    i32 -9,
     <vscale x 2 x i1> %1,
     i32 %2)
 
@@ -948,10 +948,10 @@ define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
     <vscale x 8 x i32> %0,
-    i32 9,
+    i32 -9,
     <vscale x 8 x i1> %1,
     i32 %2)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
index 4c3e9a3e4473..7e546bf87c3b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadc-rv64.ll
@@ -898,10 +898,10 @@ define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8>
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 2 x i1> %1,
     i64 %2)
 
@@ -926,10 +926,10 @@ define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8>
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 8 x i1> %1,
     i64 %2)
 
@@ -954,10 +954,10 @@ define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 32 x i1> %1,
     i64 %2)
 
@@ -982,10 +982,10 @@ define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 1 x i1> %1,
     i64 %2)
 
@@ -1010,10 +1010,10 @@ define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 4 x i1> %1,
     i64 %2)
 
@@ -1038,10 +1038,10 @@ define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 1
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 16 x i1> %1,
     i64 %2)
 
@@ -1066,10 +1066,10 @@ define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i32 -9,
     <vscale x 1 x i1> %1,
     i64 %2)
 
@@ -1094,10 +1094,10 @@ define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i32 -9,
     <vscale x 4 x i1> %1,
     i64 %2)
 
@@ -1122,10 +1122,10 @@ define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 1
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i32 -9,
     <vscale x 16 x i1> %1,
     i64 %2)
 
@@ -1150,10 +1150,10 @@ define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
     <vscale x 2 x i64> %0,
-    i64 9,
+    i64 -9,
     <vscale x 2 x i1> %1,
     i64 %2)
 
@@ -1178,10 +1178,10 @@ define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x
 entry:
 ; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
     <vscale x 8 x i64> %0,
-    i64 9,
+    i64 -9,
     <vscale x 8 x i1> %1,
     i64 %2)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
index 59a5f9f684d3..756bba5b7786 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-rv32.ll
@@ -1457,11 +1457,11 @@ define <vscale x 1 x i8> @intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i8_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1485,11 +1485,11 @@ define <vscale x 2 x i8> @intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i8_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vadd.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1513,11 +1513,11 @@ define <vscale x 4 x i8> @intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i8_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vadd.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1541,11 +1541,11 @@ define <vscale x 8 x i8> @intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i8_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vadd.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1569,11 +1569,11 @@ define <vscale x 16 x i8> @intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 1
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i8_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vadd.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1597,11 +1597,11 @@ define <vscale x 32 x i8> @intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 3
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i8_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vadd.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -1612,10 +1612,10 @@ define <vscale x 64 x i8> @intrinsic_vadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i
 entry:
 ; CHECK-LABEL: intrinsic_vadd_vi_nxv64i8_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.nxv64i8.i8(
     <vscale x 64 x i8> %0,
-    i8 9,
+    i8 -9,
     i32 %1)
 
   ret <vscale x 64 x i8> %a
@@ -1625,11 +1625,11 @@ define <vscale x 64 x i8> @intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 6
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv64i8_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vadd.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 64 x i1> %2,
     i32 %3)
 
@@ -1653,11 +1653,11 @@ define <vscale x 1 x i16> @intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i16_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vadd.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1681,11 +1681,11 @@ define <vscale x 2 x i16> @intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i16_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vadd.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1709,11 +1709,11 @@ define <vscale x 4 x i16> @intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i16_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vadd.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1737,11 +1737,11 @@ define <vscale x 8 x i16> @intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i16_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vadd.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1765,11 +1765,11 @@ define <vscale x 16 x i16> @intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i16_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vadd.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1793,11 +1793,11 @@ define <vscale x 32 x i16> @intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv32i16_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vadd.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -1821,11 +1821,11 @@ define <vscale x 1 x i32> @intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv1i32_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vadd.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1849,11 +1849,11 @@ define <vscale x 2 x i32> @intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv2i32_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vadd.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1877,11 +1877,11 @@ define <vscale x 4 x i32> @intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv4i32_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vadd.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1905,11 +1905,11 @@ define <vscale x 8 x i32> @intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv8i32_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vadd.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1933,11 +1933,11 @@ define <vscale x 16 x i32> @intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale
 entry:
 ; CHECK-LABEL: intrinsic_vadd_mask_vi_nxv16i32_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vadd.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vadd.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 16 x i1> %2,
     i32 %3)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
index 22efd35fa56e..63218bfebb65 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv32.ll
@@ -665,10 +665,10 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8>
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i8 -9,
     i32 %1)
 
   ret <vscale x 2 x i1> %a
@@ -691,10 +691,10 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8>
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i8 -9,
     i32 %1)
 
   ret <vscale x 8 x i1> %a
@@ -717,10 +717,10 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i8 -9,
     i32 %1)
 
   ret <vscale x 32 x i1> %a
@@ -743,10 +743,10 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i1
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i16 -9,
     i32 %1)
 
   ret <vscale x 1 x i1> %a
@@ -769,10 +769,10 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i1
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i16 -9,
     i32 %1)
 
   ret <vscale x 4 x i1> %a
@@ -795,10 +795,10 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i16 -9,
     i32 %1)
 
   ret <vscale x 16 x i1> %a
@@ -821,10 +821,10 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i3
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i32 -9,
     i32 %1)
 
   ret <vscale x 1 x i1> %a
@@ -847,10 +847,10 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i3
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i32 -9,
     i32 %1)
 
   ret <vscale x 4 x i1> %a
@@ -873,10 +873,10 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i32 -9,
     i32 %1)
 
   ret <vscale x 16 x i1> %a

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
index 9b71d847a200..1b6c8eb93ea3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc-rv64.ll
@@ -809,10 +809,10 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8>
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i8 -9,
     i64 %1)
 
   ret <vscale x 2 x i1> %a
@@ -835,10 +835,10 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8>
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i8 -9,
     i64 %1)
 
   ret <vscale x 8 x i1> %a
@@ -861,10 +861,10 @@ define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i1.i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i8 -9,
     i64 %1)
 
   ret <vscale x 32 x i1> %a
@@ -887,10 +887,10 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i1
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i16 -9,
     i64 %1)
 
   ret <vscale x 1 x i1> %a
@@ -913,10 +913,10 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i1
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i16 -9,
     i64 %1)
 
   ret <vscale x 4 x i1> %a
@@ -939,10 +939,10 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i16 -9,
     i64 %1)
 
   ret <vscale x 16 x i1> %a
@@ -965,10 +965,10 @@ define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i3
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i1.i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i32 -9,
     i64 %1)
 
   ret <vscale x 1 x i1> %a
@@ -991,10 +991,10 @@ define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i3
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i1.i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i32 -9,
     i64 %1)
 
   ret <vscale x 4 x i1> %a
@@ -1017,10 +1017,10 @@ define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i1.i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i32 -9,
     i64 %1)
 
   ret <vscale x 16 x i1> %a
@@ -1043,10 +1043,10 @@ define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i6
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i1.i64(
     <vscale x 2 x i64> %0,
-    i64 9,
+    i64 -9,
     i64 %1)
 
   ret <vscale x 2 x i1> %a
@@ -1069,10 +1069,10 @@ define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i6
 entry:
 ; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+; CHECK:       vmadc.vi {{v[0-9]+}}, {{v[0-9]+}}, -9
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i1.i64(
     <vscale x 8 x i64> %0,
-    i64 9,
+    i64 -9,
     i64 %1)
 
   ret <vscale x 8 x i1> %a

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
index 22cd8afb1335..3dbdf047b7e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadc.carry.in-rv32.ll
@@ -738,10 +738,10 @@ define <vscale x 2 x i1> @intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8(<vscale
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv2i1_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.carry.in.nxv2i1.i8(
     <vscale x 2 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 2 x i1> %1,
     i32 %2)
 
@@ -766,10 +766,10 @@ define <vscale x 8 x i1> @intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8(<vscale
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv8i1_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.carry.in.nxv8i1.i8(
     <vscale x 8 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 8 x i1> %1,
     i32 %2)
 
@@ -794,10 +794,10 @@ define <vscale x 32 x i1> @intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8(<vsca
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv32i1_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.carry.in.nxv32i1.i8(
     <vscale x 32 x i8> %0,
-    i8 9,
+    i8 -9,
     <vscale x 32 x i1> %1,
     i32 %2)
 
@@ -822,10 +822,10 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i16(
     <vscale x 1 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 1 x i1> %1,
     i32 %2)
 
@@ -850,10 +850,10 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i16(
     <vscale x 4 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 4 x i1> %1,
     i32 %2)
 
@@ -878,10 +878,10 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16(<vs
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i16(
     <vscale x 16 x i16> %0,
-    i16 9,
+    i16 -9,
     <vscale x 16 x i1> %1,
     i32 %2)
 
@@ -906,10 +906,10 @@ define <vscale x 1 x i1> @intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv1i1_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.carry.in.nxv1i1.i32(
     <vscale x 1 x i32> %0,
-    i32 9,
+    i32 -9,
     <vscale x 1 x i1> %1,
     i32 %2)
 
@@ -934,10 +934,10 @@ define <vscale x 4 x i1> @intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv4i1_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.carry.in.nxv4i1.i32(
     <vscale x 4 x i32> %0,
-    i32 9,
+    i32 -9,
     <vscale x 4 x i1> %1,
     i32 %2)
 
@@ -962,10 +962,10 @@ define <vscale x 16 x i1> @intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32(<vs
 entry:
 ; CHECK-LABEL: intrinsic_vmadc.carry.in_vim_nxv16i1_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, 9, v0
+; CHECK:       vmadc.vim {{v[0-9]+}}, {{v[0-9]+}}, -9, v0
   %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.carry.in.nxv16i1.i32(
     <vscale x 16 x i32> %0,
-    i32 9,
+    i32 -9,
     <vscale x 16 x i1> %1,
     i32 %2)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
index b5d3a539a0bb..108e4d5fae3f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrsub-rv32.ll
@@ -737,11 +737,11 @@ define <vscale x 1 x i8> @intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i8_nxv1i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i8> @llvm.riscv.vrsub.mask.nxv1i8.i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -765,11 +765,11 @@ define <vscale x 2 x i8> @intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i8_nxv2i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i8> @llvm.riscv.vrsub.mask.nxv2i8.i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -793,11 +793,11 @@ define <vscale x 4 x i8> @intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i8_nxv4i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i8> @llvm.riscv.vrsub.mask.nxv4i8.i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -821,11 +821,11 @@ define <vscale x 8 x i8> @intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i8_nxv8i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i8> @llvm.riscv.vrsub.mask.nxv8i8.i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -849,11 +849,11 @@ define <vscale x 16 x i8> @intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i8_nxv16i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i8> @llvm.riscv.vrsub.mask.nxv16i8.i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -877,11 +877,11 @@ define <vscale x 32 x i8> @intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i8_nxv32i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 32 x i8> @llvm.riscv.vrsub.mask.nxv32i8.i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -905,11 +905,11 @@ define <vscale x 64 x i8> @intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv64i8_nxv64i8_i8
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 64 x i8> @llvm.riscv.vrsub.mask.nxv64i8.i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i8 9,
+    i8 -9,
     <vscale x 64 x i1> %2,
     i32 %3)
 
@@ -933,11 +933,11 @@ define <vscale x 1 x i16> @intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i16_nxv1i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i16> @llvm.riscv.vrsub.mask.nxv1i16.i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -961,11 +961,11 @@ define <vscale x 2 x i16> @intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i16_nxv2i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i16> @llvm.riscv.vrsub.mask.nxv2i16.i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -989,11 +989,11 @@ define <vscale x 4 x i16> @intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i16_nxv4i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i16> @llvm.riscv.vrsub.mask.nxv4i16.i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1017,11 +1017,11 @@ define <vscale x 8 x i16> @intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i16_nxv8i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i16> @llvm.riscv.vrsub.mask.nxv8i16.i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1045,11 +1045,11 @@ define <vscale x 16 x i16> @intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i16_nxv16i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i16> @llvm.riscv.vrsub.mask.nxv16i16.i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 16 x i1> %2,
     i32 %3)
 
@@ -1073,11 +1073,11 @@ define <vscale x 32 x i16> @intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv32i16_nxv32i16_i16
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 32 x i16> @llvm.riscv.vrsub.mask.nxv32i16.i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i16 9,
+    i16 -9,
     <vscale x 32 x i1> %2,
     i32 %3)
 
@@ -1101,11 +1101,11 @@ define <vscale x 1 x i32> @intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv1i32_nxv1i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 1 x i32> @llvm.riscv.vrsub.mask.nxv1i32.i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 1 x i1> %2,
     i32 %3)
 
@@ -1129,11 +1129,11 @@ define <vscale x 2 x i32> @intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv2i32_nxv2i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 2 x i32> @llvm.riscv.vrsub.mask.nxv2i32.i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 2 x i1> %2,
     i32 %3)
 
@@ -1157,11 +1157,11 @@ define <vscale x 4 x i32> @intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv4i32_nxv4i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 4 x i32> @llvm.riscv.vrsub.mask.nxv4i32.i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 4 x i1> %2,
     i32 %3)
 
@@ -1185,11 +1185,11 @@ define <vscale x 8 x i32> @intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32(<vscale x
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv8i32_nxv8i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 8 x i32> @llvm.riscv.vrsub.mask.nxv8i32.i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 8 x i1> %2,
     i32 %3)
 
@@ -1213,11 +1213,11 @@ define <vscale x 16 x i32> @intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32(<vscal
 entry:
 ; CHECK-LABEL: intrinsic_vrsub_mask_vi_nxv16i32_nxv16i32_i32
 ; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+; CHECK:       vrsub.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
   %a = call <vscale x 16 x i32> @llvm.riscv.vrsub.mask.nxv16i32.i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    i32 -9,
     <vscale x 16 x i1> %2,
     i32 %3)
 


        


More information about the llvm-commits mailing list