[llvm-branch-commits] [llvm] 9cf3b1b - [RISCV] Define vlxe/vsxe/vsuxe intrinsics.
Zakk Chen via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Sat Dec 19 06:56:16 PST 2020
Author: Zakk Chen
Date: 2020-12-19T06:50:20-08:00
New Revision: 9cf3b1b66650610f22db1c1a4514a860c84c4daa
URL: https://github.com/llvm/llvm-project/commit/9cf3b1b66650610f22db1c1a4514a860c84c4daa
DIFF: https://github.com/llvm/llvm-project/commit/9cf3b1b66650610f22db1c1a4514a860c84c4daa.diff
LOG: [RISCV] Define vlxe/vsxe/vsuxe intrinsics.
Define vlxe/vsxe intrinsics and lower to vlxei<EEW>/vsxei<EEW>
instructions.
We work with @rogfer01 from BSC to come out this patch.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Zakk Chen <zakk.chen at sifive.com>
Differential Revision: https://reviews.llvm.org/D93471
Added:
llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index a6fbccdc75a0..c5f2dacb100a 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -125,6 +125,21 @@ let TargetPrefix = "riscv" in {
LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
+ // For indexed load
+ // Input: (pointer, index, vl)
+ class RISCVILoad
+ : Intrinsic<[llvm_anyvector_ty],
+ [LLVMPointerType<LLVMMatchType<0>>,
+ llvm_anyvector_ty, llvm_anyint_ty],
+ [NoCapture<ArgIndex<0>>, IntrReadMem]>, RISCVVIntrinsic;
+ // For indexed load with mask
+ // Input: (maskedoff, pointer, index, mask, vl)
+ class RISCVILoadMask
+ : Intrinsic<[llvm_anyvector_ty ],
+ [LLVMMatchType<0>,
+ LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+ [NoCapture<ArgIndex<1>>, IntrReadMem]>, RISCVVIntrinsic;
// For unit stride store
// Input: (vector_in, pointer, vl)
class RISCVUSStore
@@ -158,6 +173,22 @@ let TargetPrefix = "riscv" in {
LLVMPointerType<LLVMMatchType<0>>, llvm_anyint_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
[NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ // For indexed store
+ // Input: (vector_in, pointer, index, vl)
+ class RISCVIStore
+ : Intrinsic<[],
+ [llvm_anyvector_ty,
+ LLVMPointerType<LLVMMatchType<0>>,
+ llvm_anyint_ty, llvm_anyint_ty],
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
+ // For indexed store with mask
+ // Input: (vector_in, pointer, index, mask, vl)
+ class RISCVIStoreMask
+ : Intrinsic<[],
+ [llvm_anyvector_ty,
+ LLVMPointerType<LLVMMatchType<0>>, llvm_anyvector_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+ [NoCapture<ArgIndex<1>>, IntrWriteMem]>, RISCVVIntrinsic;
// For destination vector type is the same as first source vector.
// Input: (vector_in, vector_in/scalar_in, vl)
class RISCVBinaryAAXNoMask
@@ -260,6 +291,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVSLoad;
def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask;
}
+ multiclass RISCVILoad {
+ def "int_riscv_" # NAME : RISCVILoad;
+ def "int_riscv_" # NAME # "_mask" : RISCVILoadMask;
+ }
multiclass RISCVUSStore {
def "int_riscv_" # NAME : RISCVUSStore;
def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask;
@@ -268,7 +303,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVSStore;
def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask;
}
-
+ multiclass RISCVIStore {
+ def "int_riscv_" # NAME : RISCVIStore;
+ def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask;
+ }
multiclass RISCVBinaryAAX {
def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
@@ -295,6 +333,9 @@ let TargetPrefix = "riscv" in {
defm vse : RISCVUSStore;
defm vlse: RISCVSLoad;
defm vsse: RISCVSStore;
+ defm vlxe: RISCVILoad;
+ defm vsxe: RISCVIStore;
+ defm vsuxe: RISCVIStore;
defm vadd : RISCVBinaryAAX;
defm vsub : RISCVBinaryAAX;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 92340785d861..8c5973a19e4b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -347,6 +347,39 @@ class VPseudoSLoadMask<VReg RetClass>:
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+class VPseudoILoadNoMask<VReg RetClass, VReg IdxClass>:
+ Pseudo<(outs RetClass:$rd),
+ (ins GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
+ RISCVVPseudo {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
+ let Uses = [VL, VTYPE];
+ let VLIndex = 3;
+ let SEWIndex = 4;
+ let HasDummyMask = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoILoadMask<VReg RetClass, VReg IdxClass>:
+ Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
+ (ins GetVRegNoV0<RetClass>.R:$merge,
+ GPR:$rs1, IdxClass:$rs2,
+ VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+ RISCVVPseudo {
+ let mayLoad = 1;
+ let mayStore = 0;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
+ let Constraints = "$rd = $merge";
+ let Uses = [VL, VTYPE];
+ let VLIndex = 5;
+ let SEWIndex = 6;
+ let MergeOpIndex = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoUSStoreNoMask<VReg StClass>:
Pseudo<(outs),
(ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>,
@@ -440,6 +473,35 @@ class VPseudoBinaryNoMask<VReg RetClass,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
+class VPseudoIStoreNoMask<VReg StClass, VReg IdxClass>:
+ Pseudo<(outs),
+ (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
+ let Uses = [VL, VTYPE];
+ let VLIndex = 3;
+ let SEWIndex = 4;
+ let HasDummyMask = 1;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
+class VPseudoIStoreMask<VReg StClass, VReg IdxClass>:
+ Pseudo<(outs),
+ (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>,
+ RISCVVPseudo {
+ let mayLoad = 0;
+ let mayStore = 1;
+ let hasSideEffects = 0;
+ let usesCustomInserter = 1;
+ let Uses = [VL, VTYPE];
+ let VLIndex = 4;
+ let SEWIndex = 5;
+ let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
+}
+
class VPseudoBinaryMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
@@ -508,6 +570,20 @@ multiclass VPseudoSLoad {
}
}
+multiclass VPseudoILoad {
+ foreach lmul = MxList.m in
+ foreach idx_lmul = MxList.m in {
+ defvar LInfo = lmul.MX;
+ defvar Vreg = lmul.vrclass;
+ defvar IdxLInfo = idx_lmul.MX;
+ defvar IdxVreg = idx_lmul.vrclass;
+ let VLMul = lmul.value in {
+ def "_V_" # IdxLInfo # "_" # LInfo : VPseudoILoadNoMask<Vreg, IdxVreg>;
+ def "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoILoadMask<Vreg, IdxVreg>;
+ }
+ }
+}
+
multiclass VPseudoUSStore {
foreach lmul = MxList.m in {
defvar LInfo = lmul.MX;
@@ -530,6 +606,20 @@ multiclass VPseudoSStore {
}
}
+multiclass VPseudoIStore {
+ foreach lmul = MxList.m in
+ foreach idx_lmul = MxList.m in {
+ defvar LInfo = lmul.MX;
+ defvar Vreg = lmul.vrclass;
+ defvar IdxLInfo = idx_lmul.MX;
+ defvar IdxVreg = idx_lmul.vrclass;
+ let VLMul = lmul.value in {
+ def "_V_" # IdxLInfo # "_" # LInfo : VPseudoIStoreNoMask<Vreg, IdxVreg>;
+ def "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoIStoreMask<Vreg, IdxVreg>;
+ }
+ }
+}
+
multiclass VPseudoBinary<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
@@ -854,6 +944,31 @@ multiclass VPatSLoad<string intrinsic,
$rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
}
+multiclass VPatILoad<string intrinsic,
+ string inst,
+ LLVMType type,
+ LLVMType idx_type,
+ LLVMType mask_type,
+ int sew,
+ LMULInfo vlmul,
+ LMULInfo idx_vlmul,
+ VReg reg_class,
+ VReg idx_reg_class>
+{
+ defvar Intr = !cast<Intrinsic>(intrinsic);
+ defvar Pseudo = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX);
+ def : Pat<(type (Intr GPR:$rs1, (idx_type idx_reg_class:$rs2), GPR:$vl)),
+ (Pseudo $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
+
+ defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
+ defvar PseudoMask = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX#"_MASK");
+ def : Pat<(type (IntrMask (type GetVRegNoV0<reg_class>.R:$merge),
+ GPR:$rs1, (idx_type idx_reg_class:$rs2),
+ (mask_type V0), GPR:$vl)),
+ (PseudoMask $merge,
+ $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+}
+
multiclass VPatUSStore<string intrinsic,
string inst,
LLVMType type,
@@ -890,6 +1005,29 @@ multiclass VPatSStore<string intrinsic,
(PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
}
+multiclass VPatIStore<string intrinsic,
+ string inst,
+ LLVMType type,
+ LLVMType idx_type,
+ LLVMType mask_type,
+ int sew,
+ LMULInfo vlmul,
+ LMULInfo idx_vlmul,
+ VReg reg_class,
+ VReg idx_reg_class>
+{
+ defvar Intr = !cast<Intrinsic>(intrinsic);
+ defvar Pseudo = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX);
+ def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1,
+ (idx_type idx_reg_class:$rs2), GPR:$vl),
+ (Pseudo $rs3, $rs1, $rs2, (NoX0 GPR:$vl), sew)>;
+ defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
+ defvar PseudoMask = !cast<Instruction>(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX#"_MASK");
+ def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1,
+ (idx_type idx_reg_class:$rs2), (mask_type V0), GPR:$vl),
+ (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
+}
+
multiclass VPatBinary<string intrinsic,
string inst,
string kind,
@@ -1243,6 +1381,17 @@ foreach eew = EEWList in {
defm PseudoVSSE # eew : VPseudoSStore;
}
+//===----------------------------------------------------------------------===//
+// 7.6 Vector Indexed Instructions
+//===----------------------------------------------------------------------===//
+
+// Vector Indexed Loads and Stores
+foreach eew = EEWList in {
+ defm PseudoVLXEI # eew : VPseudoILoad;
+ defm PseudoVSXEI # eew : VPseudoIStore;
+ defm PseudoVSUXEI # eew : VPseudoIStore;
+}
+
//===----------------------------------------------------------------------===//
// Pseudo Instructions
//===----------------------------------------------------------------------===//
@@ -1448,6 +1597,55 @@ foreach vti = AllVectors in
vti.Vector, vti.Mask, vti.SEW, vti.LMul, vti.RegClass>;
}
+//===----------------------------------------------------------------------===//
+// 7.6 Vector Indexed Instructions
+//===----------------------------------------------------------------------===//
+
+foreach vti = AllVectors in
+foreach eew = EEWList in {
+ defvar vlmul = vti.LMul;
+ defvar octuple_lmul = !cond(!eq(vti.LMul.MX, "MF8") : 1,
+ !eq(vti.LMul.MX, "MF4") : 2,
+ !eq(vti.LMul.MX, "MF2") : 4,
+ !eq(vti.LMul.MX, "M1") : 8,
+ !eq(vti.LMul.MX, "M2") : 16,
+ !eq(vti.LMul.MX, "M4") : 32,
+ !eq(vti.LMul.MX, "M8") : 64);
+ defvar log_sew = shift_amount<vti.SEW>.val;
+ // The data vector register group has EEW=SEW, EMUL=LMUL, while the offset
+ // vector register group has EEW encoding in the instruction and EMUL=(EEW/SEW)*LMUL.
+ // calculate octuple elmul which is (eew * octuple_lmul) >> log_sew
+ defvar octuple_elmul = !srl(!mul(eew, octuple_lmul), log_sew);
+ // legal octuple elmul should be more than 0 and less than equal 64
+ if !gt(octuple_elmul, 0) then {
+ if !le(octuple_elmul, 64) then {
+ defvar log_elmul = shift_amount<octuple_elmul>.val;
+ // 0, 1, 2 -> V_MF8 ~ V_MF2
+ // 3, 4, 5, 6 -> V_M1 ~ V_M8
+ defvar elmul_str = !if(!eq(log_elmul, 0), "MF8",
+ !if(!eq(log_elmul, 1), "MF4",
+ !if(!eq(log_elmul, 2), "MF2",
+ "M" # !cast<string>(!shl(1, !add(log_elmul, -3))))));
+ defvar elmul =!cast<LMULInfo>("V_" # elmul_str);
+ defvar idx_vti = !cast<VTypeInfo>("VI" # eew # elmul_str);
+
+ defm : VPatILoad<"int_riscv_vlxe",
+ "PseudoVLXEI"#eew,
+ vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
+ vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
+ defm : VPatIStore<"int_riscv_vsxe",
+ "PseudoVSXEI"#eew,
+ vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
+ vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
+ defm : VPatIStore<"int_riscv_vsuxe",
+ "PseudoVSUXEI"#eew,
+ vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
+ vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
+ }
+ }
+}
+
+
//===----------------------------------------------------------------------===//
// 12. Vector Integer Arithmetic Instructions
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
new file mode 100644
index 000000000000..d9b23e17a86d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
@@ -0,0 +1,3281 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>* %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>* %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half>* %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>* %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vlxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>* %0,
+ <vscale x 64 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>* %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half>* %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
new file mode 100644
index 000000000000..5ecb7d502942
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
@@ -0,0 +1,5361 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i64(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i64(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i64(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i64(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i64(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i64(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i64(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i64(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i64(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i64(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i64(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i64(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i64(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i64(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i64(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i64(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i64(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i64(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i64(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i64(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i64(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i64(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i64(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i64(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i32(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i32(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i32(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i32(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i32(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i32(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i32(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i32(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>* %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>* %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half>* %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i16(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i16(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i16(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i16(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i16(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i16(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i16(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i16(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>* %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vlxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>* %0,
+ <vscale x 64 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>* %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half>* %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i8(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i8(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i8(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i8(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i8(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i8(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i8(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ %a = call <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i8(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll
new file mode 100644
index 000000000000..f4d4fc7fe621
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll
@@ -0,0 +1,3445 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll
new file mode 100644
index 000000000000..2a766d5d2f35
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll
@@ -0,0 +1,5629 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsuxe.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll
new file mode 100644
index 000000000000..7795b72f499b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll
@@ -0,0 +1,3445 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll
new file mode 100644
index 000000000000..d7d7b0d319b0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll
@@ -0,0 +1,5629 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsxe.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
+ call void @llvm.riscv.vsxe.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
+ call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
More information about the llvm-branch-commits
mailing list