[llvm-branch-commits] [llvm] 4b07c51 - [RISCV] Define vlse/vsse intrinsics.

Zakk Chen via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Thu Dec 17 17:05:00 PST 2020


Author: Zakk Chen
Date: 2020-12-17T17:00:01-08:00
New Revision: 4b07c515ef407786a5c2ebc9f7f9d2638eeaf8cf

URL: https://github.com/llvm/llvm-project/commit/4b07c515ef407786a5c2ebc9f7f9d2638eeaf8cf
DIFF: https://github.com/llvm/llvm-project/commit/4b07c515ef407786a5c2ebc9f7f9d2638eeaf8cf.diff

LOG: [RISCV] Define vlse/vsse intrinsics.

Define vlse/vsse intrinsics and lower to V instructions.

We work with @rogfer01 from BSC to come out this patch.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D93445

Added: 
    llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll

Modified: 
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index f5c933c51706..805035f9244e 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -95,6 +95,21 @@ let TargetPrefix = "riscv" in {
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                      llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
+  // For strided load
+  // Input: (pointer, stride, vl)
+  class RISCVSLoad
+        : Intrinsic<[llvm_anyvector_ty],
+                    [LLVMPointerType<LLVMMatchType<0>>,
+                     llvm_anyint_ty, LLVMMatchType<1>],
+                    [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+  // For strided load with mask
+  // Input: (maskedoff, pointer, stride, mask, vl)
+  class RISCVSLoadMask
+        : Intrinsic<[llvm_anyvector_ty ],
+                    [LLVMMatchType<0>,
+                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
+                    [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
   // For unit stride store
   // Input: (vector_in, pointer, vl)
   class RISCVUSStore
@@ -112,6 +127,22 @@ let TargetPrefix = "riscv" in {
                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                      llvm_anyint_ty],
                     [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+  // For strided store
+  // Input: (vector_in, pointer, stride, vl)
+  class RISCVSStore
+        : Intrinsic<[],
+                    [llvm_anyvector_ty,
+                     LLVMPointerType<LLVMMatchType<0>>,
+                     llvm_anyint_ty, LLVMMatchType<1>],
+                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+  // For stride store with mask
+  // Input: (vector_in, pointer, stirde, mask, vl)
+  class RISCVSStoreMask
+        : Intrinsic<[],
+                    [llvm_anyvector_ty,
+                     LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
+                    [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
   // For destination vector type is the same as first source vector.
   // Input: (vector_in, vector_in/scalar_in, vl)
   class RISCVBinaryAAXNoMask
@@ -182,10 +213,18 @@ let TargetPrefix = "riscv" in {
     def "int_riscv_" # NAME : RISCVUSLoad;
     def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
   }
+  multiclass RISCVSLoad {
+    def "int_riscv_" # NAME : RISCVSLoad;
+    def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
+  }
   multiclass RISCVUSStore {
     def "int_riscv_" # NAME : RISCVUSStore;
     def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
   }
+  multiclass RISCVSStore {
+    def "int_riscv_" # NAME : RISCVSStore;
+    def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
+  }
 
   multiclass RISCVBinaryAAX {
     def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
@@ -207,6 +246,8 @@ let TargetPrefix = "riscv" in {
 
   defm vle : RISCVUSLoad;
   defm vse : RISCVUSStore;
+  defm vlse: RISCVSLoad;
+  defm vsse: RISCVSStore;
 
   defm vadd : RISCVBinaryAAX;
   defm vsub : RISCVBinaryAAX;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 4d31f1217a3b..300d0b23c0d1 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -310,6 +310,39 @@ class VPseudoUSLoadMask<VReg RetClass>:
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+class VPseudoSLoadNoMask<VReg RetClass>:
+      Pseudo<(outs RetClass:$rd),
+             (ins GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 3;
+  let SEWIndex = 4;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoSLoadMask<VReg RetClass>:
+      Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+              (ins GetVRegNoV0<RetClass>.R:$merge,
+                   GPR:$rs1, GPR:$rs2,
+                   VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo {
+  let mayLoad = 1;
+  let mayStore = 0;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Constraints = "$rd = $merge";
+  let Uses = [VL, VTYPE];
+  let VLIndex = 5;
+  let SEWIndex = 6;
+  let MergeOpIndex = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 class VPseudoUSStoreNoMask<VReg StClass>:
       Pseudo<(outs),
               (ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
@@ -339,6 +372,35 @@ class VPseudoUSStoreMask<VReg StClass>:
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
+class VPseudoSStoreNoMask<VReg StClass>:
+      Pseudo<(outs),
+              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 1;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 3;
+  let SEWIndex = 4;
+  let HasDummyMask = 1;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoSStoreMask<VReg StClass>:
+      Pseudo<(outs),
+              (ins StClass:$rd, GPR:$rs1, GPR:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+      RISCVVPseudo {
+  let mayLoad = 0;
+  let mayStore = 1;
+  let hasSideEffects = 0;
+  let usesCustomInserter = 1;
+  let Uses = [VL, VTYPE];
+  let VLIndex = 4;
+  let SEWIndex = 5;
+  let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
 class VPseudoBinaryNoMask<VReg RetClass,
                           VReg Op1Class,
                           DAGOperand Op2Class,
@@ -415,6 +477,17 @@ multiclass VPseudoUSLoad {
   }
 }
 
+multiclass VPseudoSLoad {
+  foreach lmul = MxList.m in {
+    defvar LInfo = lmul.MX;
+    defvar vreg = lmul.vrclass;
+    let VLMul = lmul.value in {
+      def "_V_" # LInfo : VPseudoSLoadNoMask<vreg>;
+      def "_V_" # LInfo # "_MASK" : VPseudoSLoadMask<vreg>;
+    }
+  }
+}
+
 multiclass VPseudoUSStore {
   foreach lmul = MxList.m in {
     defvar LInfo = lmul.MX;
@@ -426,6 +499,17 @@ multiclass VPseudoUSStore {
   }
 }
 
+multiclass VPseudoSStore {
+  foreach lmul = MxList.m in {
+    defvar LInfo = lmul.MX;
+    defvar vreg = lmul.vrclass;
+    let VLMul = lmul.value in {
+      def "_V_" # LInfo : VPseudoSStoreNoMask<vreg>;
+      def "_V_" # LInfo # "_MASK" : VPseudoSStoreMask<vreg>;
+    }
+  }
+}
+
 multiclass VPseudoBinary<VReg RetClass,
                          VReg Op1Class,
                          DAGOperand Op2Class,
@@ -720,6 +804,26 @@ multiclass VPatUSLoad<string intrinsic,
                                 $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
 }
 
+multiclass VPatSLoad<string intrinsic,
+                     string inst,
+                     LLVMType type,
+                     LLVMType mask_type,
+                     int sew,
+                     LMULInfo vlmul,
+                     VReg reg_class>
+{
+    defvar Intr = !cast<Intrinsic>(intrinsic);
+    defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
+    def : Pat<(type (Intr GPR:$rs1, GPR:$rs2, GPR:$vl)),
+                    (Pseudo $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
+    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
+    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
+    def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
+                               GPR:$rs1, GPR:$rs2, (mask_type V0), GPR:$vl)),
+                    (PseudoMask $merge,
+                                $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+}
+
 multiclass VPatUSStore<string intrinsic,
                        string inst,
                        LLVMType type,
@@ -738,6 +842,24 @@ multiclass VPatUSStore<string intrinsic,
               (PseudoMask $rs3, $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
 }
 
+multiclass VPatSStore<string intrinsic,
+                      string inst,
+                      LLVMType type,
+                      LLVMType mask_type,
+                      int sew,
+                      LMULInfo vlmul,
+                      VReg reg_class>
+{
+    defvar Intr = !cast<Intrinsic>(intrinsic);
+    defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
+    def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, GPR:$vl),
+                    (Pseudo $rs3, $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
+    defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
+    defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
+    def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, GPR:$rs2, (mask_type V0), GPR:$vl),
+              (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+}
+
 multiclass VPatBinary<string intrinsic,
                       string inst,
                       string kind,
@@ -1081,6 +1203,16 @@ foreach eew = EEWList in {
   defm PseudoVSE # eew : VPseudoUSStore;
 }
 
+//===----------------------------------------------------------------------===//
+// 7.5 Vector Strided Instructions
+//===----------------------------------------------------------------------===//
+
+// Vector Strided Loads and Stores
+foreach eew = EEWList in {
+  defm PseudoVLSE # eew : VPseudoSLoad;
+  defm PseudoVSSE # eew : VPseudoSStore;
+}
+
 //===----------------------------------------------------------------------===//
 // Pseudo Instructions
 //===----------------------------------------------------------------------===//
@@ -1207,6 +1339,20 @@ foreach vti = AllVectors in
                      vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
 }
 
+//===----------------------------------------------------------------------===//
+// 7.5 Vector Strided Instructions
+//===----------------------------------------------------------------------===//
+
+foreach vti = AllVectors in
+{
+  defm : VPatSLoad<"int_riscv_vlse",
+                   "PseudoVLSE" # vti.SEW,
+                   vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
+  defm : VPatSStore<"int_riscv_vsse",
+                    "PseudoVSSE" # vti.SEW,
+                    vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
+}
+
 //===----------------------------------------------------------------------===//
 // 12. Vector Integer Arithmetic Instructions
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
new file mode 100644
index 000000000000..9c1032d53243
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
@@ -0,0 +1,1161 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
+  <vscale x 1 x i32>*,
+  i32,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
+    <vscale x 1 x i32>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
+  <vscale x 2 x i32>*,
+  i32,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
+    <vscale x 2 x i32>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
+  <vscale x 4 x i32>*,
+  i32,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
+    <vscale x 4 x i32>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
+  <vscale x 8 x i32>*,
+  i32,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
+    <vscale x 8 x i32>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
+  <vscale x 16 x i32>*,
+  i32,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
+    <vscale x 16 x i32>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
+  <vscale x 1 x float>*,
+  i32,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
+    <vscale x 1 x float>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
+  <vscale x 2 x float>*,
+  i32,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
+    <vscale x 2 x float>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
+  <vscale x 4 x float>*,
+  i32,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
+    <vscale x 4 x float>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
+  <vscale x 8 x float>*,
+  i32,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
+    <vscale x 8 x float>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
+  <vscale x 16 x float>*,
+  i32,
+  i32);
+
+define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
+    <vscale x 16 x float>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
+  <vscale x 16 x float>,
+  <vscale x 16 x float>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
+  <vscale x 1 x i16>*,
+  i32,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
+    <vscale x 1 x i16>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
+  <vscale x 2 x i16>*,
+  i32,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
+    <vscale x 2 x i16>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
+  <vscale x 4 x i16>*,
+  i32,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
+    <vscale x 4 x i16>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
+  <vscale x 8 x i16>*,
+  i32,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
+    <vscale x 8 x i16>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
+  <vscale x 16 x i16>*,
+  i32,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
+    <vscale x 16 x i16>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
+  <vscale x 32 x i16>*,
+  i32,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
+    <vscale x 32 x i16>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>*,
+  i32,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16>* %1,
+    i32 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
+  <vscale x 1 x half>*,
+  i32,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
+    <vscale x 1 x half>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
+  <vscale x 2 x half>*,
+  i32,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
+    <vscale x 2 x half>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
+  <vscale x 4 x half>*,
+  i32,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
+    <vscale x 4 x half>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
+  <vscale x 8 x half>*,
+  i32,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
+    <vscale x 8 x half>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
+  <vscale x 16 x half>*,
+  i32,
+  i32);
+
+define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
+    <vscale x 16 x half>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
+  <vscale x 32 x half>*,
+  i32,
+  i32);
+
+define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
+    <vscale x 32 x half>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
+  <vscale x 32 x half>,
+  <vscale x 32 x half>*,
+  i32,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
+    <vscale x 32 x half> %0,
+    <vscale x 32 x half>* %1,
+    i32 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
+  <vscale x 1 x i8>*,
+  i32,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
+    <vscale x 1 x i8>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
+  <vscale x 2 x i8>*,
+  i32,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
+    <vscale x 2 x i8>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
+  <vscale x 4 x i8>*,
+  i32,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
+    <vscale x 4 x i8>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
+  <vscale x 8 x i8>*,
+  i32,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
+    <vscale x 8 x i8>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
+  <vscale x 16 x i8>*,
+  i32,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
+    <vscale x 16 x i8>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
+  <vscale x 32 x i8>*,
+  i32,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
+    <vscale x 32 x i8>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>*,
+  i32,
+  <vscale x 32 x i1>,
+  i32);
+
+define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    i32 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
+  <vscale x 64 x i8>*,
+  i32,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
+    <vscale x 64 x i8>* %0,
+    i32 %1,
+    i32 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>*,
+  i32,
+  <vscale x 64 x i1>,
+  i32);
+
+define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    i32 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 64 x i8> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
new file mode 100644
index 000000000000..87b4c5f97914
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
@@ -0,0 +1,1481 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
+  <vscale x 1 x i64>*,
+  i64,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
+    <vscale x 1 x i64>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
+  <vscale x 2 x i64>*,
+  i64,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
+    <vscale x 2 x i64>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
+  <vscale x 4 x i64>*,
+  i64,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
+    <vscale x 4 x i64>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
+  <vscale x 8 x i64>*,
+  i64,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
+    <vscale x 8 x i64>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
+  <vscale x 1 x double>*,
+  i64,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
+    <vscale x 1 x double>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
+  <vscale x 2 x double>*,
+  i64,
+  i64);
+
+define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
+    <vscale x 2 x double>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
+  <vscale x 4 x double>*,
+  i64,
+  i64);
+
+define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
+    <vscale x 4 x double>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
+  <vscale x 8 x double>*,
+  i64,
+  i64);
+
+define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
+    <vscale x 8 x double>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vlse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
+  <vscale x 1 x i32>*,
+  i64,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
+    <vscale x 1 x i32>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
+  <vscale x 2 x i32>*,
+  i64,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
+    <vscale x 2 x i32>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
+  <vscale x 4 x i32>*,
+  i64,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
+    <vscale x 4 x i32>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
+  <vscale x 8 x i32>*,
+  i64,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
+    <vscale x 8 x i32>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
+  <vscale x 16 x i32>*,
+  i64,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
+    <vscale x 16 x i32>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
+  <vscale x 1 x float>*,
+  i64,
+  i64);
+
+define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
+    <vscale x 1 x float>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
+  <vscale x 2 x float>*,
+  i64,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
+    <vscale x 2 x float>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
+  <vscale x 4 x float>*,
+  i64,
+  i64);
+
+define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
+    <vscale x 4 x float>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
+  <vscale x 8 x float>*,
+  i64,
+  i64);
+
+define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
+    <vscale x 8 x float>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
+  <vscale x 16 x float>*,
+  i64,
+  i64);
+
+define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
+    <vscale x 16 x float>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
+  <vscale x 16 x float>,
+  <vscale x 16 x float>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vlse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
+  <vscale x 1 x i16>*,
+  i64,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
+    <vscale x 1 x i16>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
+  <vscale x 2 x i16>*,
+  i64,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
+    <vscale x 2 x i16>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
+  <vscale x 4 x i16>*,
+  i64,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
+    <vscale x 4 x i16>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
+  <vscale x 8 x i16>*,
+  i64,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
+    <vscale x 8 x i16>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
+  <vscale x 16 x i16>*,
+  i64,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
+    <vscale x 16 x i16>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
+  <vscale x 32 x i16>*,
+  i64,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
+    <vscale x 32 x i16>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>*,
+  i64,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16>* %1,
+    i64 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
+  <vscale x 1 x half>*,
+  i64,
+  i64);
+
+define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
+    <vscale x 1 x half>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
+  <vscale x 2 x half>*,
+  i64,
+  i64);
+
+define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
+    <vscale x 2 x half>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
+  <vscale x 4 x half>*,
+  i64,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
+    <vscale x 4 x half>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
+  <vscale x 8 x half>*,
+  i64,
+  i64);
+
+define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
+    <vscale x 8 x half>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
+  <vscale x 16 x half>*,
+  i64,
+  i64);
+
+define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
+    <vscale x 16 x half>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
+  <vscale x 32 x half>*,
+  i64,
+  i64);
+
+define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
+    <vscale x 32 x half>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
+  <vscale x 32 x half>,
+  <vscale x 32 x half>*,
+  i64,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vlse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
+    <vscale x 32 x half> %0,
+    <vscale x 32 x half>* %1,
+    i64 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
+  <vscale x 1 x i8>*,
+  i64,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
+    <vscale x 1 x i8>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
+  <vscale x 2 x i8>*,
+  i64,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
+    <vscale x 2 x i8>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
+  <vscale x 4 x i8>*,
+  i64,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
+    <vscale x 4 x i8>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
+  <vscale x 8 x i8>*,
+  i64,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
+    <vscale x 8 x i8>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
+  <vscale x 16 x i8>*,
+  i64,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
+    <vscale x 16 x i8>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
+  <vscale x 32 x i8>*,
+  i64,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
+    <vscale x 32 x i8>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>*,
+  i64,
+  <vscale x 32 x i1>,
+  i64);
+
+define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    i64 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
+  <vscale x 64 x i8>*,
+  i64,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1
+  %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
+    <vscale x 64 x i8>* %0,
+    i64 %1,
+    i64 %2)
+
+  ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>*,
+  i64,
+  <vscale x 64 x i1>,
+  i64);
+
+define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vlse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    i64 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret <vscale x 64 x i8> %a
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
new file mode 100644
index 000000000000..d5712f1228d4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
@@ -0,0 +1,1219 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsse.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16f32(
+  <vscale x 16 x float>,
+  <vscale x 16 x float>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16f32(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16f32(
+  <vscale x 16 x float>,
+  <vscale x 16 x float>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16f32(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>*,
+  i32,
+  <vscale x 32 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16>* %1,
+    i32 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32f16(
+  <vscale x 32 x half>,
+  <vscale x 32 x half>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv32f16(
+    <vscale x 32 x half> %0,
+    <vscale x 32 x half>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32f16(
+  <vscale x 32 x half>,
+  <vscale x 32 x half>*,
+  i32,
+  <vscale x 32 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv32f16(
+    <vscale x 32 x half> %0,
+    <vscale x 32 x half>* %1,
+    i32 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  i32,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    i32 %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  i32,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    i32 %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  i32,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    i32 %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  i32,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    i32 %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>*,
+  i32,
+  <vscale x 16 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    i32 %2,
+    <vscale x 16 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>*,
+  i32,
+  <vscale x 32 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    i32 %2,
+    <vscale x 32 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>*,
+  i32,
+  i32);
+
+define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    i32 %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>*,
+  i32,
+  <vscale x 64 x i1>,
+  i32);
+
+define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    i32 %2,
+    <vscale x 64 x i1> %3,
+    i32 %4)
+
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
new file mode 100644
index 000000000000..ad24148a94e8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
@@ -0,0 +1,1555 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN:   --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsse.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1f64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m1,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1f64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2f64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m2,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2f64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4f64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m4,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4f64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8f64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64
+; CHECK:       vsetvli {{.*}}, a2, e64,m8,ta,mu
+; CHECK:       vsse64.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8f64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i32(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1i32(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i32(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2i32(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i32(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4i32(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i32(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8i32(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i32(
+  <vscale x 16 x i32>,
+  <vscale x 16 x i32>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16i32(
+    <vscale x 16 x i32> %0,
+    <vscale x 16 x i32>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1f32(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32
+; CHECK:       vsetvli {{.*}}, a2, e32,mf2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1f32(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2f32(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m1,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2f32(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4f32(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m2,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4f32(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8f32(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m4,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8f32(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16f32(
+  <vscale x 16 x float>,
+  <vscale x 16 x float>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16f32(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16f32(
+  <vscale x 16 x float>,
+  <vscale x 16 x float>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32
+; CHECK:       vsetvli {{.*}}, a2, e32,m8,ta,mu
+; CHECK:       vsse32.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16f32(
+    <vscale x 16 x float> %0,
+    <vscale x 16 x float>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i16(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1i16(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i16(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2i16(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i16(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4i16(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i16(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8i16(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i16(
+  <vscale x 16 x i16>,
+  <vscale x 16 x i16>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16i16(
+    <vscale x 16 x i16> %0,
+    <vscale x 16 x i16>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32i16(
+  <vscale x 32 x i16>,
+  <vscale x 32 x i16>*,
+  i64,
+  <vscale x 32 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv32i16(
+    <vscale x 32 x i16> %0,
+    <vscale x 32 x i16>* %1,
+    i64 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1f16(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1f16(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2f16(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16
+; CHECK:       vsetvli {{.*}}, a2, e16,mf2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2f16(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4f16(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m1,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4f16(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8f16(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m2,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8f16(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16f16(
+  <vscale x 16 x half>,
+  <vscale x 16 x half>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m4,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16f16(
+    <vscale x 16 x half> %0,
+    <vscale x 16 x half>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32f16(
+  <vscale x 32 x half>,
+  <vscale x 32 x half>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv32f16(
+    <vscale x 32 x half> %0,
+    <vscale x 32 x half>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32f16(
+  <vscale x 32 x half>,
+  <vscale x 32 x half>*,
+  i64,
+  <vscale x 32 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16
+; CHECK:       vsetvli {{.*}}, a2, e16,m8,ta,mu
+; CHECK:       vsse16.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv32f16(
+    <vscale x 32 x half> %0,
+    <vscale x 32 x half>* %1,
+    i64 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv1i8(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  i64,
+  <vscale x 1 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf8,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv1i8(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    i64 %2,
+    <vscale x 1 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv2i8(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  i64,
+  <vscale x 2 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf4,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv2i8(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    i64 %2,
+    <vscale x 2 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv4i8(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  i64,
+  <vscale x 4 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8
+; CHECK:       vsetvli {{.*}}, a2, e8,mf2,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv4i8(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    i64 %2,
+    <vscale x 4 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv8i8(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  i64,
+  <vscale x 8 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m1,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv8i8(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    i64 %2,
+    <vscale x 8 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv16i8(
+  <vscale x 16 x i8>,
+  <vscale x 16 x i8>*,
+  i64,
+  <vscale x 16 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m2,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv16i8(
+    <vscale x 16 x i8> %0,
+    <vscale x 16 x i8>* %1,
+    i64 %2,
+    <vscale x 16 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv32i8(
+  <vscale x 32 x i8>,
+  <vscale x 32 x i8>*,
+  i64,
+  <vscale x 32 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m4,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv32i8(
+    <vscale x 32 x i8> %0,
+    <vscale x 32 x i8>* %1,
+    i64 %2,
+    <vscale x 32 x i1> %3,
+    i64 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>*,
+  i64,
+  i64);
+
+define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1
+  call void @llvm.riscv.vsse.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    i64 %2,
+    i64 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsse.mask.nxv64i8(
+  <vscale x 64 x i8>,
+  <vscale x 64 x i8>*,
+  i64,
+  <vscale x 64 x i1>,
+  i64);
+
+define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8
+; CHECK:       vsetvli {{.*}}, a2, e8,m8,ta,mu
+; CHECK:       vsse8.v {{v[0-9]+}}, (a0), a1, v0.t
+  call void @llvm.riscv.vsse.mask.nxv64i8(
+    <vscale x 64 x i8> %0,
+    <vscale x 64 x i8>* %1,
+    i64 %2,
+    <vscale x 64 x i1> %3,
+    i64 %4)
+
+  ret void
+}


        


More information about the llvm-branch-commits mailing list