[llvm] c707716 - [RISCV] Match vmslt(u).vx intrinsics with a small immediate to vmsle(u).vx.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 5 10:21:45 PST 2021


Author: Craig Topper
Date: 2021-01-05T10:20:21-08:00
New Revision: c707716c049cd46bd89da102cf9444462487b490

URL: https://github.com/llvm/llvm-project/commit/c707716c049cd46bd89da102cf9444462487b490
DIFF: https://github.com/llvm/llvm-project/commit/c707716c049cd46bd89da102cf9444462487b490.diff

LOG: [RISCV] Match vmslt(u).vx intrinsics with a small immediate to vmsle(u).vx.

There are vmsle(u).vx and vmsle(u).vi instructions, but there is
only vmslt(u).vx and no vmslt(u).vi. vmslt(u).vi can be emulated
for some immediates by decrementing the immediate and using vmsle(u).vi.

To avoid the user needing to know about this, this patch does this
conversion.

The assembler does the same thing for vmslt(u).vi and vmsge(u).vi
pseudoinstructions. There is no vmsge(u).vx intrinsic or
instruction so this patch is limited to vmslt(u).

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D94070

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 87c4ca7c1253..d10c2a2c70ab 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -35,6 +35,11 @@ def NoX0 : SDNodeXForm<undef,
   return SDValue(N, 0);
 }]>;
 
+def DecImm : SDNodeXForm<imm, [{
+  return CurDAG->getTargetConstant(N->getSExtValue() - 1, SDLoc(N),
+                                   N->getValueType(0));
+}]>;
+
 //===----------------------------------------------------------------------===//
 // Utilities.
 //===----------------------------------------------------------------------===//
@@ -3080,6 +3085,71 @@ defm "" : VPatBinaryM_VV_VX_VI<"int_riscv_vmsle", "PseudoVMSLE", AllIntegerVecto
 defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>;
 defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>;
 
+// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This
+// avoids the user needing to know that there is no vmslt(u).vi instruction.
+// This is limited to vmslt(u).vx as there is no vmsge().vx intrinsic or
+// instruction.
+foreach vti = AllIntegerVectors in {
+  def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1),
+                                       (vti.Scalar simm5_plus1:$rs2), GPR:$vl)),
+            (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
+                                                               (DecImm simm5_plus1:$rs2),
+                                                               (NoX0 GPR:$vl),
+                                                               vti.SEW)>;
+  def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask V0),
+                                            (vti.Vector vti.RegClass:$rs1),
+                                            (vti.Scalar simm5_plus1:$rs2),
+                                            (vti.Mask VR:$merge),
+                                            GPR:$vl)),
+            (!cast<Instruction>("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK")
+                                                      VR:$merge,
+                                                      vti.RegClass:$rs1,
+                                                      (DecImm simm5_plus1:$rs2),
+                                                      (vti.Mask V0),
+                                                      (NoX0 GPR:$vl),
+                                                      vti.SEW)>;
+
+  def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
+                                        (vti.Scalar simm5_plus1:$rs2), GPR:$vl)),
+            (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1,
+                                                                (DecImm simm5_plus1:$rs2),
+                                                                (NoX0 GPR:$vl),
+                                                                vti.SEW)>;
+  def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask V0),
+                                             (vti.Vector vti.RegClass:$rs1),
+                                             (vti.Scalar simm5_plus1:$rs2),
+                                             (vti.Mask VR:$merge),
+                                             GPR:$vl)),
+            (!cast<Instruction>("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK")
+                                                      VR:$merge,
+                                                      vti.RegClass:$rs1,
+                                                      (DecImm simm5_plus1:$rs2),
+                                                      (vti.Mask V0),
+                                                      (NoX0 GPR:$vl),
+                                                      vti.SEW)>;
+
+  // Special cases to avoid matching vmsltu.vi 0 (always false) to
+  // vmsleu.vi -1 (always true). Instead match to vmsne.vv.
+  def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1),
+                                        (vti.Scalar 0), GPR:$vl)),
+            (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
+                                                               vti.RegClass:$rs1,
+                                                               (NoX0 GPR:$vl),
+                                                               vti.SEW)>;
+  def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask V0),
+                                            (vti.Vector vti.RegClass:$rs1),
+                                            (vti.Scalar 0),
+                                            (vti.Mask VR:$merge),
+                                            GPR:$vl)),
+            (!cast<Instruction>("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK")
+                                                     VR:$merge,
+                                                     vti.RegClass:$rs1,
+                                                     vti.RegClass:$rs1,
+                                                     (vti.Mask V0),
+                                                     (NoX0 GPR:$vl),
+                                                     vti.SEW)>;
+}
+
 //===----------------------------------------------------------------------===//
 // 12.9. Vector Integer Min/Max Instructions
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
index 997feac5131f..18a18acc6484 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll
@@ -1259,3 +1259,423 @@ entry:
 
   ret <vscale x 8 x i1> %a
 }
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 -15,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i8> %1,
+    i8 -14,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 -13,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i8> %1,
+    i8 -12,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -12
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 -11,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i8> %1,
+    i8 -10,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -10
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 -9,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i8> %1,
+    i8 -8,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -8
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 -7,
+    i32 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i8> %1,
+    i8 -6,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -6
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 -5,
+    i32 %1)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i8> %1,
+    i8 -4,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -4
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 -3,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i16> %1,
+    i16 -2,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -2
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 -1,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i16> %1,
+    i16 0,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 0,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i16> %1,
+    i16 1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 1
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 2,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i16> %1,
+    i16 3,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 3
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 4,
+    i32 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i16> %1,
+    i16 5,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 5
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 6,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i32> %1,
+    i32 7,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 7
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 8,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 10,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i32> %1,
+    i32 11,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 11
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 12,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i32> %1,
+    i32 13,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
index da9797716842..1bbad3af2036 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll
@@ -1511,3 +1511,507 @@ entry:
 
   ret <vscale x 4 x i1> %a
 }
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 -15,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i8> %1,
+    i8 -14,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 -13,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i8> %1,
+    i8 -12,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -12
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 -11,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i8> %1,
+    i8 -10,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -10
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 -9,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i8> %1,
+    i8 -8,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -8
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 -7,
+    i64 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i8> %1,
+    i8 -6,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -6
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 -5,
+    i64 %1)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i8> %1,
+    i8 -4,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -4
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 -3,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i16> %1,
+    i16 -2,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -2
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 -1,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i16> %1,
+    i16 0,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 0,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i16> %1,
+    i16 1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 1
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 2,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i16> %1,
+    i16 3,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 3
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 4,
+    i64 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i16> %1,
+    i16 5,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 5
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 6,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i32> %1,
+    i32 7,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 7
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 8,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 10,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i32> %1,
+    i32 11,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 11
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 12,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i32> %1,
+    i32 13,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 13
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 14,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 14, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i64> %1,
+    i64 15,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 15
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 16,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i64> %1,
+    i64 -15,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 -14,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
+; CHECK:       vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i64> %1,
+    i64 -13,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
index 797188edbb2f..3c1f41625b47 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
@@ -1259,3 +1259,423 @@ entry:
 
   ret <vscale x 8 x i1> %a
 }
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 -15,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i8> %1,
+    i8 -14,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 -13,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i8> %1,
+    i8 -12,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -12
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 -11,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i8> %1,
+    i8 -10,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -10
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 -9,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i8> %1,
+    i8 -8,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -8
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 -7,
+    i32 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i8> %1,
+    i8 -6,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -6
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 -5,
+    i32 %1)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i8> %1,
+    i8 -4,
+    <vscale x 32 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -4
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 -3,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i16> %1,
+    i16 -2,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -2
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 -1,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK:       vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]], v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i16> %1,
+    i16 0,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]]
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 0,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i16> %1,
+    i16 1,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 1
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 2,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i16> %1,
+    i16 3,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 3
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 4,
+    i32 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i16> %1,
+    i16 5,
+    <vscale x 16 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 5
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 6,
+    i32 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i32> %1,
+    i32 7,
+    <vscale x 1 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 7
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 8,
+    i32 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 10,
+    i32 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i32> %1,
+    i32 11,
+    <vscale x 4 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 11
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 12,
+    i32 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i32> %1,
+    i32 13,
+    <vscale x 8 x i1> %2,
+    i32 %3)
+
+  ret <vscale x 8 x i1> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
index 462173e83329..ec6007771754 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
@@ -1511,3 +1511,507 @@ entry:
 
   ret <vscale x 4 x i1> %a
 }
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i8.i8(
+    <vscale x 1 x i8> %0,
+    i8 -15,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i8> %1,
+    i8 -14,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i8.i8(
+    <vscale x 2 x i8> %0,
+    i8 -13,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i8> %1,
+    i8 -12,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -12
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i8.i8(
+    <vscale x 4 x i8> %0,
+    i8 -11,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i8> %1,
+    i8 -10,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -10
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i8.i8(
+    <vscale x 8 x i8> %0,
+    i8 -9,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i8> %1,
+    i8 -8,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -8
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8.i8(
+    <vscale x 16 x i8> %0,
+    i8 -7,
+    i64 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i8> %1,
+    i8 -6,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -6
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8.i8(
+    <vscale x 32 x i8> %0,
+    i8 -5,
+    i64 %1)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t
+  %a = call <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
+    <vscale x 32 x i1> %0,
+    <vscale x 32 x i8> %1,
+    i8 -4,
+    <vscale x 32 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 32 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -4
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i16.i16(
+    <vscale x 1 x i16> %0,
+    i16 -3,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i16> %1,
+    i16 -2,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -2
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i16.i16(
+    <vscale x 2 x i16> %0,
+    i16 -1,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
+; CHECK:       vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]], v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i16> %1,
+    i16 0,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK:       vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]]
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
+    <vscale x 4 x i16> %0,
+    i16 0,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i16> %1,
+    i16 1,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 1
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16.i16(
+    <vscale x 8 x i16> %0,
+    i16 2,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i16> %1,
+    i16 3,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 3
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16.i16(
+    <vscale x 16 x i16> %0,
+    i16 4,
+    i64 %1)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t
+  %a = call <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
+    <vscale x 16 x i1> %0,
+    <vscale x 16 x i16> %1,
+    i16 5,
+    <vscale x 16 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 16 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 5
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i32.i32(
+    <vscale x 1 x i32> %0,
+    i32 6,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i32> %1,
+    i32 7,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 7
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i32.i32(
+    <vscale x 2 x i32> %0,
+    i32 8,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i32> %1,
+    i32 9,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32.i32(
+    <vscale x 4 x i32> %0,
+    i32 10,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i32> %1,
+    i32 11,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 11
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32.i32(
+    <vscale x 8 x i32> %0,
+    i32 12,
+    i64 %1)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t
+  %a = call <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
+    <vscale x 8 x i1> %0,
+    <vscale x 8 x i32> %1,
+    i32 13,
+    <vscale x 8 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 8 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 13
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.nxv1i64.i64(
+    <vscale x 1 x i64> %0,
+    i64 14,
+    i64 %1)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 14, v0.t
+  %a = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
+    <vscale x 1 x i1> %0,
+    <vscale x 1 x i64> %1,
+    i64 15,
+    <vscale x 1 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 1 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 15
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64.i64(
+    <vscale x 2 x i64> %0,
+    i64 16,
+    i64 %1)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16, v0.t
+  %a = call <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
+    <vscale x 2 x i1> %0,
+    <vscale x 2 x i64> %1,
+    i64 -15,
+    <vscale x 2 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 2 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64.i64(
+    <vscale x 4 x i64> %0,
+    i64 -14,
+    i64 %1)
+
+  ret <vscale x 4 x i1> %a
+}
+
+define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64
+; CHECK:       vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
+; CHECK:       vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14, v0.t
+  %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
+    <vscale x 4 x i1> %0,
+    <vscale x 4 x i64> %1,
+    i64 -13,
+    <vscale x 4 x i1> %2,
+    i64 %3)
+
+  ret <vscale x 4 x i1> %a
+}


        


More information about the llvm-commits mailing list