[llvm-branch-commits] [llvm] c1d6d46 - [RISCV] Define vle/vse intrinsics.
Zakk Chen via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Wed Dec 16 18:12:36 PST 2020
Author: Zakk Chen
Date: 2020-12-16T18:08:15-08:00
New Revision: c1d6d461aa77921d7ce761e2966e6bc1f3eee2db
URL: https://github.com/llvm/llvm-project/commit/c1d6d461aa77921d7ce761e2966e6bc1f3eee2db
DIFF: https://github.com/llvm/llvm-project/commit/c1d6d461aa77921d7ce761e2966e6bc1f3eee2db.diff
LOG: [RISCV] Define vle/vse intrinsics.
Define vle/vse intrinsics and lower to V instructions.
We work with @rogfer01 from BSC to come out this patch.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D93359
Added:
llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index b5436be70f1a..30d15347206c 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -79,6 +79,37 @@ class RISCVVIntrinsic {
}
let TargetPrefix = "riscv" in {
+ // For unit stride load
+ // Input: (pointer, vl)
+ class RISCVUSLoad
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMPointerType<LLVMMatchType<0>>,
+ llvm_anyint_ty],
+ [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ // For unit stride load with mask
+ // Input: (maskedoff, pointer, mask, vl)
+ class RISCVUSLoadMask
+ : Intrinsic<[llvm_anyvector_ty ],
+ [LLVMMatchType<0>,
+ LLVMPointerType<LLVMMatchType<0>>,
+ llvm_anyvector_ty, llvm_anyint_ty],
+ [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
+ // For unit stride store
+ // Input: (vector_in, pointer, vl)
+ class RISCVUSStore
+ : Intrinsic<[],
+ [llvm_anyvector_ty,
+ LLVMPointerType<LLVMMatchType<0>>,
+ llvm_anyint_ty],
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ // For unit stride store with mask
+ // Input: (vector_in, pointer, mask, vl)
+ class RISCVUSStoreMask
+ : Intrinsic<[],
+ [llvm_anyvector_ty,
+ LLVMPointerType<LLVMMatchType<0>>,
+ llvm_anyvector_ty, llvm_anyint_ty],
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
// For destination vector type is the same as first source vector.
// Input: (vector_in, vector_in/scalar_in, vl)
class RISCVBinaryAAXNoMask
@@ -87,6 +118,7 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 2;
}
+
// For destination vector type is the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
class RISCVBinaryAAXMask
@@ -142,6 +174,15 @@ let TargetPrefix = "riscv" in {
let ExtendOperand = 2;
}
+ multiclass RISCVUSLoad {
+ def "int_riscv_" # NAME : RISCVUSLoad;
+ def "int_riscv_" # NAME # "_mask" : RISCVUSLoadMask;
+ }
+ multiclass RISCVUSStore {
+ def "int_riscv_" # NAME : RISCVUSStore;
+ def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
+ }
+
multiclass RISCVBinaryAAX {
def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
@@ -160,6 +201,9 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVBinaryMOut;
}
+ defm vle : RISCVUSLoad;
+ defm vse : RISCVUSStore;
+
defm vadd : RISCVBinaryAAX;
defm vsub : RISCVBinaryAAX;
defm vrsub : RISCVBinaryAAX;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 25fd7435affd..d3282a953652 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -700,6 +700,44 @@ class VPatBinaryMask<string intrinsic_name,
ToFPR32<op2_type, op2_kind, "rs2">.ret,
(mask_type V0), (NoX0 GPR:$vl), sew)>;
+multiclass VPatUSLoad<string intrinsic,
+ string inst,
+ LLVMType type,
+ LLVMType mask_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg reg_class>
+{
+ defvar Intr = !cast<Intrinsic>(intrinsic);
+ defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
+ def : Pat<(type (Intr GPR:$rs1, GPR:$vl)),
+ (Pseudo $rs1, (NoX0 GPR:$vl), sew)>;
+ defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
+ defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
+ def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
+ GPR:$rs1, (mask_type V0), GPR:$vl)),
+ (PseudoMask $merge,
+ $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+}
+
+multiclass VPatUSStore<string intrinsic,
+ string inst,
+ LLVMType type,
+ LLVMType mask_type,
+ int sew,
+ LMULInfo vlmul,
+ VReg reg_class>
+{
+ defvar Intr = !cast<Intrinsic>(intrinsic);
+ defvar Pseudo = !cast<Instruction>(inst#"_V_"#vlmul.MX);
+ def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, GPR:$vl),
+ (Pseudo $rs3, $rs1, (NoX0 GPR:$vl), sew)>;
+ defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
+ defvar PseudoMask = !cast<Instruction>(inst#"_V_"#vlmul.MX#"_MASK");
+ def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, (mask_type V0), GPR:$vl),
+ (PseudoMask $rs3, $rs1, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+}
+
multiclass VPatBinary<string intrinsic,
string inst,
string kind,
@@ -1033,6 +1071,10 @@ def PseudoVSETVLI : Pseudo<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), []>
// 7. Vector Loads and Stores
//===----------------------------------------------------------------------===//
+//===----------------------------------------------------------------------===//
+// 7.4 Vector Unit-Stride Instructions
+//===----------------------------------------------------------------------===//
+
// Pseudos Unit-Stride Loads and Stores
foreach eew = EEWList in {
defm PseudoVLE # eew : VPseudoUSLoad;
@@ -1124,6 +1166,24 @@ defm "" : VPatUSLoadStoreSDNodes<AddrFI>;
// 12.1. Vector Single-Width Integer Add and Subtract
defm "" : VPatBinarySDNode<add, "PseudoVADD">;
+//===----------------------------------------------------------------------===//
+// 7. Vector Loads and Stores
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// 7.4 Vector Unit-Stride Instructions
+//===----------------------------------------------------------------------===//
+
+foreach vti = AllVectors in
+{
+ defm : VPatUSLoad<"int_riscv_vle",
+ "PseudoVLE" # vti.SEW,
+ vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
+ defm : VPatUSStore<"int_riscv_vse",
+ "PseudoVSE" # vti.SEW,
+ vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
+}
+
//===----------------------------------------------------------------------===//
// 12. Vector Integer Arithmetic Instructions
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
new file mode 100644
index 000000000000..be01dc42df64
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
@@ -0,0 +1,1045 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32>*,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32>* %0,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
+ <vscale x 2 x i32>*,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
+ <vscale x 2 x i32>* %0,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
+ <vscale x 4 x i32>*,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
+ <vscale x 4 x i32>* %0,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
+ <vscale x 8 x i32>*,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
+ <vscale x 8 x i32>* %0,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
+ <vscale x 16 x i32>*,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
+ <vscale x 16 x i32>* %0,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
+ <vscale x 1 x float>*,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
+ <vscale x 1 x float>* %0,
+ i32 %1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
+ <vscale x 2 x float>*,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
+ <vscale x 2 x float>* %0,
+ i32 %1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
+ <vscale x 4 x float>*,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
+ <vscale x 4 x float>* %0,
+ i32 %1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
+ <vscale x 8 x float>*,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
+ <vscale x 8 x float>* %0,
+ i32 %1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
+ <vscale x 16 x float>*,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
+ <vscale x 16 x float>* %0,
+ i32 %1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
+ <vscale x 1 x i16>*,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
+ <vscale x 1 x i16>* %0,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
+ <vscale x 2 x i16>*,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
+ <vscale x 2 x i16>* %0,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
+ <vscale x 4 x i16>*,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
+ <vscale x 4 x i16>* %0,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
+ <vscale x 8 x i16>*,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
+ <vscale x 8 x i16>* %0,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
+ <vscale x 16 x i16>*,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
+ <vscale x 16 x i16>* %0,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
+ <vscale x 32 x i16>*,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
+ <vscale x 32 x i16>* %0,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
+ <vscale x 1 x half>*,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
+ <vscale x 1 x half>* %0,
+ i32 %1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
+ <vscale x 2 x half>*,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
+ <vscale x 2 x half>* %0,
+ i32 %1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
+ <vscale x 4 x half>*,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
+ <vscale x 4 x half>* %0,
+ i32 %1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
+ <vscale x 8 x half>*,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
+ <vscale x 8 x half>* %0,
+ i32 %1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
+ <vscale x 16 x half>*,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
+ <vscale x 16 x half>* %0,
+ i32 %1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
+ <vscale x 32 x half>*,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
+ <vscale x 32 x half>* %0,
+ i32 %1)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
+ <vscale x 1 x i8>*,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
+ <vscale x 1 x i8>* %0,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
+ <vscale x 2 x i8>*,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
+ <vscale x 2 x i8>* %0,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
+ <vscale x 4 x i8>*,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
+ <vscale x 4 x i8>* %0,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
+ <vscale x 8 x i8>*,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
+ <vscale x 8 x i8>* %0,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
+ <vscale x 16 x i8>*,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
+ <vscale x 16 x i8>* %0,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
+ <vscale x 32 x i8>*,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
+ <vscale x 32 x i8>* %0,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
+ <vscale x 64 x i8>*,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
+ <vscale x 64 x i8>* %0,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 64 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
new file mode 100644
index 000000000000..1726b07053ed
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
@@ -0,0 +1,1333 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
+ <vscale x 1 x i64>*,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
+ <vscale x 1 x i64>* %0,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
+ <vscale x 2 x i64>*,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
+ <vscale x 2 x i64>* %0,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
+ <vscale x 4 x i64>*,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
+ <vscale x 4 x i64>* %0,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
+ <vscale x 8 x i64>*,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
+ <vscale x 8 x i64>* %0,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
+ <vscale x 1 x double>*,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
+ <vscale x 1 x double>* %0,
+ i64 %1)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
+ <vscale x 2 x double>*,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
+ <vscale x 2 x double>* %0,
+ i64 %1)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
+ <vscale x 4 x double>*,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
+ <vscale x 4 x double>* %0,
+ i64 %1)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
+ <vscale x 8 x double>*,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
+ <vscale x 8 x double>* %0,
+ i64 %1)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vle64.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32>*,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
+ <vscale x 1 x i32>* %0,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
+ <vscale x 2 x i32>*,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
+ <vscale x 2 x i32>* %0,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
+ <vscale x 4 x i32>*,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
+ <vscale x 4 x i32>* %0,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
+ <vscale x 8 x i32>*,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
+ <vscale x 8 x i32>* %0,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
+ <vscale x 16 x i32>*,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
+ <vscale x 16 x i32>* %0,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
+ <vscale x 1 x float>*,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
+ <vscale x 1 x float>* %0,
+ i64 %1)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
+ <vscale x 2 x float>*,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
+ <vscale x 2 x float>* %0,
+ i64 %1)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
+ <vscale x 4 x float>*,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
+ <vscale x 4 x float>* %0,
+ i64 %1)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
+ <vscale x 8 x float>*,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
+ <vscale x 8 x float>* %0,
+ i64 %1)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
+ <vscale x 16 x float>*,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
+ <vscale x 16 x float>* %0,
+ i64 %1)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vle32.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
+ <vscale x 1 x i16>*,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
+ <vscale x 1 x i16>* %0,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
+ <vscale x 2 x i16>*,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
+ <vscale x 2 x i16>* %0,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
+ <vscale x 4 x i16>*,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
+ <vscale x 4 x i16>* %0,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
+ <vscale x 8 x i16>*,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
+ <vscale x 8 x i16>* %0,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
+ <vscale x 16 x i16>*,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
+ <vscale x 16 x i16>* %0,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
+ <vscale x 32 x i16>*,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
+ <vscale x 32 x i16>* %0,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
+ <vscale x 1 x half>*,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
+ <vscale x 1 x half>* %0,
+ i64 %1)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
+ <vscale x 2 x half>*,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
+ <vscale x 2 x half>* %0,
+ i64 %1)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
+ <vscale x 4 x half>*,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
+ <vscale x 4 x half>* %0,
+ i64 %1)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
+ <vscale x 8 x half>*,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
+ <vscale x 8 x half>* %0,
+ i64 %1)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
+ <vscale x 16 x half>*,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
+ <vscale x 16 x half>* %0,
+ i64 %1)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
+ <vscale x 32 x half>*,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
+ <vscale x 32 x half>* %0,
+ i64 %1)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vle16.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
+ <vscale x 1 x i8>*,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
+ <vscale x 1 x i8>* %0,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
+ <vscale x 2 x i8>*,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
+ <vscale x 2 x i8>* %0,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
+ <vscale x 4 x i8>*,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
+ <vscale x 4 x i8>* %0,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
+ <vscale x 8 x i8>*,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
+ <vscale x 8 x i8>* %0,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
+ <vscale x 16 x i8>*,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
+ <vscale x 16 x i8>* %0,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
+ <vscale x 32 x i8>*,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
+ <vscale x 32 x i8>* %0,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
+ <vscale x 64 x i8>*,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0)
+ %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
+ <vscale x 64 x i8>* %0,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vle8.v {{v[0-9]+}}, (a0), v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 64 x i8> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
new file mode 100644
index 000000000000..f9d8f9557c54
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
@@ -0,0 +1,1103 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vse.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv32f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv32f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ i32);
+
+define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ i32 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i1>,
+ i32);
+
+define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i1> %2,
+ i32 %3)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
new file mode 100644
index 000000000000..e49eed85d5b0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
@@ -0,0 +1,1407 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vse.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1f64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1f64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2f64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2f64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4f64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4f64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8f64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8f64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vse64.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8f64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1f32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1f32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2f32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2f32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4f32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4f32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8f32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8f32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16f32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vse32.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16f32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1f16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1f16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2f16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2f16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4f16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4f16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8f16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8f16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16f16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16f16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv32f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32f16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vse16.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv32f16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ i64);
+
+define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0)
+ call void @llvm.riscv.vse.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ i64 %2)
+
+ ret void
+}
+
+declare void @llvm.riscv.vse.mask.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i1>,
+ i64);
+
+define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vse8.v {{v[0-9]+}}, (a0), v0.t
+ call void @llvm.riscv.vse.mask.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i1> %2,
+ i64 %3)
+
+ ret void
+}
More information about the llvm-branch-commits
mailing list