[llvm-branch-commits] [llvm] b7ab672 - [RISCV] New vector load/store in V extension v1.0
Hsiangkai Wang via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Jan 21 15:35:11 PST 2021
Author: Hsiangkai Wang
Date: 2021-01-22T07:30:09+08:00
New Revision: b7ab6726b6de9608896fce4372b30b4fd50b0a2a
URL: https://github.com/llvm/llvm-project/commit/b7ab6726b6de9608896fce4372b30b4fd50b0a2a
DIFF: https://github.com/llvm/llvm-project/commit/b7ab6726b6de9608896fce4372b30b4fd50b0a2a.diff
LOG: [RISCV] New vector load/store in V extension v1.0
Upgrade RISC-V V extension to v1.0-08a0b46.
Indexed load/store have ordered and unordered form.
New whole vector load/store.
Differential Revision: https://reviews.llvm.org/D93614
Added:
llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
llvm/lib/Target/RISCV/RISCVInstrInfoV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
llvm/test/MC/RISCV/rvv/aliases.s
llvm/test/MC/RISCV/rvv/load.s
llvm/test/MC/RISCV/rvv/store.s
llvm/test/MC/RISCV/rvv/zvlsseg.s
Removed:
llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index f4d7b84c00f5..38828baead12 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -684,9 +684,9 @@ let TargetPrefix = "riscv" in {
defm vse : RISCVUSStore;
defm vlse: RISCVSLoad;
defm vsse: RISCVSStore;
- defm vlxe: RISCVILoad;
- defm vsxe: RISCVIStore;
- defm vsuxe: RISCVIStore;
+ defm vloxei: RISCVILoad;
+ defm vsoxei: RISCVIStore;
+ defm vsuxei: RISCVIStore;
defm vamoswap : RISCVAMO;
defm vamoadd : RISCVAMO;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td b/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
index 030571a370fd..147993127e78 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrFormatsV.td
@@ -24,9 +24,10 @@ def OPMVX : RISCVVFormat<0b110>;
class RISCVMOP<bits<2> val> {
bits<2> Value = val;
}
-def MOPLDUnitStride : RISCVMOP<0b00>;
-def MOPLDStrided : RISCVMOP<0b10>;
-def MOPLDIndexed : RISCVMOP<0b11>;
+def MOPLDUnitStride : RISCVMOP<0b00>;
+def MOPLDIndexedUnord : RISCVMOP<0b01>;
+def MOPLDStrided : RISCVMOP<0b10>;
+def MOPLDIndexedOrder : RISCVMOP<0b11>;
def MOPSTUnitStride : RISCVMOP<0b00>;
def MOPSTIndexedUnord : RISCVMOP<0b01>;
@@ -242,7 +243,7 @@ class RVInstVLS<bits<3> nf, bit mew, bits<3> width,
let RVVConstraint = VMConstraint;
}
-class RVInstVLX<bits<3> nf, bit mew, bits<3> width,
+class RVInstVLX<bits<3> nf, bit mew, RISCVMOP mop, bits<3> width,
dag outs, dag ins, string opcodestr, string argstr>
: RVInst<outs, ins, opcodestr, argstr, [], InstFormatR> {
bits<5> vs2;
@@ -252,7 +253,7 @@ class RVInstVLX<bits<3> nf, bit mew, bits<3> width,
let Inst{31-29} = nf;
let Inst{28} = mew;
- let Inst{27-26} = MOPLDIndexed.Value;
+ let Inst{27-26} = mop.Value;
let Inst{25} = vm;
let Inst{24-20} = vs2;
let Inst{19-15} = rs1;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index e7962b921278..4f9e9cfbdb98 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -97,16 +97,16 @@ class VStridedLoad<RISCVWidth width, string opcodestr>
"$vd, (${rs1}), $rs2$vm">;
// load vd, (rs1), vs2, vm
-class VIndexedLoad<RISCVWidth width, string opcodestr>
- : RVInstVLX<0b000, width.Value{3}, width.Value{2-0},
+class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
+ : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
(outs VR:$vd),
(ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
"$vd, (${rs1}), $vs2$vm">;
// vl<nf>r.v vd, (rs1)
-class VWholeLoad<bits<3> nf, string opcodestr>
- : RVInstVLU<nf, 0b0, LUMOPUnitStrideWholeReg,
- 0b000, (outs VR:$vd), (ins GPR:$rs1),
+class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr>
+ : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
+ width.Value{2-0}, (outs VR:$vd), (ins GPR:$rs1),
opcodestr, "$vd, (${rs1})"> {
let vm = 1;
let Uses = [];
@@ -128,8 +128,9 @@ class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
"$vd, (${rs1}), $rs2$vm">;
// segment load vd, (rs1), vs2, vm
-class VIndexedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
- : RVInstVLX<nf, width.Value{3}, width.Value{2-0},
+class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
+ string opcodestr>
+ : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
(outs VR:$vd),
(ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
"$vd, (${rs1}), $vs2$vm">;
@@ -157,7 +158,7 @@ class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
// vs<nf>r.v vd, (rs1)
class VWholeStore<bits<3> nf, string opcodestr>
- : RVInstVSU<nf, 0b0, SUMOPUnitStrideWholeReg,
+ : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
0b000, (outs), (ins VR:$vs3, GPR:$rs1),
opcodestr, "$vs3, (${rs1})"> {
let vm = 1;
@@ -177,8 +178,9 @@ class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
opcodestr, "$vs3, (${rs1}), $rs2$vm">;
// segment store vd, vs3, (rs1), vs2, vm
-class VIndexedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
- : RVInstVSX<nf, width.Value{3}, MOPSTIndexedOrder, width.Value{2-0}, (outs),
+class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
+ string opcodestr>
+ : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
(ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
opcodestr, "$vs3, (${rs1}), $vs2$vm">;
} // hasSideEffects = 0, mayLoad = 0, mayStore = 1
@@ -416,6 +418,17 @@ multiclass VAMO<RISCVAMOOP amoop, RISCVWidth width, string opcodestr> {
def _UNWD : VAMONoWd<amoop, width, opcodestr>;
}
+multiclass VWholeLoad<bits<3> nf, string opcodestr> {
+ def E8_V : VWholeLoad<nf, LSWidth8, opcodestr # "e8.v">;
+ def E16_V : VWholeLoad<nf, LSWidth16, opcodestr # "e16.v">;
+ def E32_V : VWholeLoad<nf, LSWidth32, opcodestr # "e32.v">;
+ def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v">;
+ def E128_V : VWholeLoad<nf, LSWidth128, opcodestr # "e128.v">;
+ def E256_V : VWholeLoad<nf, LSWidth256, opcodestr # "e256.v">;
+ def E512_V : VWholeLoad<nf, LSWidth512, opcodestr # "e512.v">;
+ def E1024_V : VWholeLoad<nf, LSWidth1024, opcodestr # "e1024.v">;
+}
+
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
@@ -477,35 +490,39 @@ def VSSE512_V : VStridedStore<LSWidth512, "vsse512.v">;
def VSSE1024_V : VStridedStore<LSWidth1024, "vsse1024.v">;
// Vector Indexed Instructions
-def VLXEI8_V : VIndexedLoad<LSWidth8, "vlxei8.v">;
-def VLXEI16_V : VIndexedLoad<LSWidth16, "vlxei16.v">;
-def VLXEI32_V : VIndexedLoad<LSWidth32, "vlxei32.v">;
-def VLXEI64_V : VIndexedLoad<LSWidth64, "vlxei64.v">;
-def VLXEI128_V : VIndexedLoad<LSWidth128, "vlxei128.v">;
-def VLXEI256_V : VIndexedLoad<LSWidth256, "vlxei256.v">;
-def VLXEI512_V : VIndexedLoad<LSWidth512, "vlxei512.v">;
-def VLXEI1024_V : VIndexedLoad<LSWidth1024, "vlxei1024.v">;
-
-def VSXEI8_V : VIndexedStore<MOPSTIndexedOrder, LSWidth8, "vsxei8.v">;
-def VSXEI16_V : VIndexedStore<MOPSTIndexedOrder, LSWidth16, "vsxei16.v">;
-def VSXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsxei32.v">;
-def VSXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsxei64.v">;
-def VSXEI128_V : VIndexedStore<MOPSTIndexedOrder, LSWidth128, "vsxei128.v">;
-def VSXEI256_V : VIndexedStore<MOPSTIndexedOrder, LSWidth256, "vsxei256.v">;
-def VSXEI512_V : VIndexedStore<MOPSTIndexedOrder, LSWidth512, "vsxei512.v">;
-def VSXEI1024_V : VIndexedStore<MOPSTIndexedOrder, LSWidth1024, "vsxei1024.v">;
+def VLUXEI8_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth8, "vluxei8.v">;
+def VLUXEI16_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth16, "vluxei16.v">;
+def VLUXEI32_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth32, "vluxei32.v">;
+def VLUXEI64_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth64, "vluxei64.v">;
+
+def VLOXEI8_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth8, "vloxei8.v">;
+def VLOXEI16_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth16, "vloxei16.v">;
+def VLOXEI32_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth32, "vloxei32.v">;
+def VLOXEI64_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth64, "vloxei64.v">;
def VSUXEI8_V : VIndexedStore<MOPSTIndexedUnord, LSWidth8, "vsuxei8.v">;
def VSUXEI16_V : VIndexedStore<MOPSTIndexedUnord, LSWidth16, "vsuxei16.v">;
def VSUXEI32_V : VIndexedStore<MOPSTIndexedUnord, LSWidth32, "vsuxei32.v">;
def VSUXEI64_V : VIndexedStore<MOPSTIndexedUnord, LSWidth64, "vsuxei64.v">;
-def VSUXEI128_V : VIndexedStore<MOPSTIndexedUnord, LSWidth128, "vsuxei128.v">;
-def VSUXEI256_V : VIndexedStore<MOPSTIndexedUnord, LSWidth256, "vsuxei256.v">;
-def VSUXEI512_V : VIndexedStore<MOPSTIndexedUnord, LSWidth512, "vsuxei512.v">;
-def VSUXEI1024_V : VIndexedStore<MOPSTIndexedUnord, LSWidth1024, "vsuxei1024.v">;
-def VL1R_V : VWholeLoad<0, "vl1r.v">;
-def VS1R_V : VWholeStore<0, "vs1r.v">;
+def VSOXEI8_V : VIndexedStore<MOPSTIndexedOrder, LSWidth8, "vsoxei8.v">;
+def VSOXEI16_V : VIndexedStore<MOPSTIndexedOrder, LSWidth16, "vsoxei16.v">;
+def VSOXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsoxei32.v">;
+def VSOXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsoxei64.v">;
+
+defm VL1R : VWholeLoad<1, "vl1r">;
+defm VL2R : VWholeLoad<2, "vl2r">;
+defm VL4R : VWholeLoad<4, "vl4r">;
+defm VL8R : VWholeLoad<8, "vl8r">;
+def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
+def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VR:$vd, GPR:$rs1)>;
+def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VR:$vd, GPR:$rs1)>;
+def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VR:$vd, GPR:$rs1)>;
+
+def VS1R_V : VWholeStore<1, "vs1r.v">;
+def VS2R_V : VWholeStore<2, "vs2r.v">;
+def VS4R_V : VWholeStore<4, "vs4r.v">;
+def VS8R_V : VWholeStore<8, "vs8r.v">;
// Vector Single-Width Integer Add and Subtract
defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
@@ -1084,23 +1101,73 @@ let Predicates = [HasStdExtZvlsseg] in {
def VSSSEG#nf#E1024_V : VStridedSegmentStore<!add(nf, -1), LSWidth1024, "vssseg"#nf#"e1024.v">;
// Vector Indexed Instructions
- def VLXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), LSWidth8, "vlxseg"#nf#"ei8.v">;
- def VLXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), LSWidth16, "vlxseg"#nf#"ei16.v">;
- def VLXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), LSWidth32, "vlxseg"#nf#"ei32.v">;
- def VLXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), LSWidth64, "vlxseg"#nf#"ei64.v">;
- def VLXSEG#nf#EI128_V : VIndexedSegmentLoad<!add(nf, -1), LSWidth128, "vlxseg"#nf#"ei128.v">;
- def VLXSEG#nf#EI256_V : VIndexedSegmentLoad<!add(nf, -1), LSWidth256, "vlxseg"#nf#"ei256.v">;
- def VLXSEG#nf#EI512_V : VIndexedSegmentLoad<!add(nf, -1), LSWidth512, "vlxseg"#nf#"ei512.v">;
- def VLXSEG#nf#EI1024_V : VIndexedSegmentLoad<!add(nf, -1), LSWidth1024, "vlxseg"#nf#"ei1024.v">;
-
- def VSXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), LSWidth8, "vsxseg"#nf#"ei8.v">;
- def VSXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), LSWidth16, "vsxseg"#nf#"ei16.v">;
- def VSXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), LSWidth32, "vsxseg"#nf#"ei32.v">;
- def VSXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), LSWidth64, "vsxseg"#nf#"ei64.v">;
- def VSXSEG#nf#EI128_V : VIndexedSegmentStore<!add(nf, -1), LSWidth128, "vsxseg"#nf#"ei128.v">;
- def VSXSEG#nf#EI256_V : VIndexedSegmentStore<!add(nf, -1), LSWidth256, "vsxseg"#nf#"ei256.v">;
- def VSXSEG#nf#EI512_V : VIndexedSegmentStore<!add(nf, -1), LSWidth512, "vsxseg"#nf#"ei512.v">;
- def VSXSEG#nf#EI1024_V : VIndexedSegmentStore<!add(nf, -1), LSWidth1024, "vsxseg"#nf#"ei1024.v">;
+ def VLUXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+ LSWidth8, "vluxseg"#nf#"ei8.v">;
+ def VLUXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+ LSWidth16, "vluxseg"#nf#"ei16.v">;
+ def VLUXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+ LSWidth32, "vluxseg"#nf#"ei32.v">;
+ def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+ LSWidth64, "vluxseg"#nf#"ei64.v">;
+ def VLUXSEG#nf#EI128_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+ LSWidth128, "vluxseg"#nf#"ei128.v">;
+ def VLUXSEG#nf#EI256_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+ LSWidth256, "vluxseg"#nf#"ei256.v">;
+ def VLUXSEG#nf#EI512_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+ LSWidth512, "vluxseg"#nf#"ei512.v">;
+ def VLUXSEG#nf#EI1024_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+ LSWidth1024, "vluxseg"#nf#"ei1024.v">;
+
+ def VLOXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+ LSWidth8, "vloxseg"#nf#"ei8.v">;
+ def VLOXSEG#nf#EI16_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+ LSWidth16, "vloxseg"#nf#"ei16.v">;
+ def VLOXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+ LSWidth32, "vloxseg"#nf#"ei32.v">;
+ def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+ LSWidth64, "vloxseg"#nf#"ei64.v">;
+ def VLOXSEG#nf#EI128_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+ LSWidth128, "vloxseg"#nf#"ei128.v">;
+ def VLOXSEG#nf#EI256_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+ LSWidth256, "vloxseg"#nf#"ei256.v">;
+ def VLOXSEG#nf#EI512_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+ LSWidth512, "vloxseg"#nf#"ei512.v">;
+ def VLOXSEG#nf#EI1024_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+ LSWidth1024, "vloxseg"#nf#"ei1024.v">;
+
+ def VSUXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+ LSWidth8, "vsuxseg"#nf#"ei8.v">;
+ def VSUXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+ LSWidth16, "vsuxseg"#nf#"ei16.v">;
+ def VSUXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+ LSWidth32, "vsuxseg"#nf#"ei32.v">;
+ def VSUXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+ LSWidth64, "vsuxseg"#nf#"ei64.v">;
+ def VSUXSEG#nf#EI128_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+ LSWidth128, "vsuxseg"#nf#"ei128.v">;
+ def VSUXSEG#nf#EI256_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+ LSWidth256, "vsuxseg"#nf#"ei256.v">;
+ def VSUXSEG#nf#EI512_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+ LSWidth512, "vsuxseg"#nf#"ei512.v">;
+ def VSUXSEG#nf#EI1024_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+ LSWidth1024, "vsuxseg"#nf#"ei1024.v">;
+
+ def VSOXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
+ LSWidth8, "vsoxseg"#nf#"ei8.v">;
+ def VSOXSEG#nf#EI16_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
+ LSWidth16, "vsoxseg"#nf#"ei16.v">;
+ def VSOXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
+ LSWidth32, "vsoxseg"#nf#"ei32.v">;
+ def VSOXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
+ LSWidth64, "vsoxseg"#nf#"ei64.v">;
+ def VSOXSEG#nf#EI128_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
+ LSWidth128, "vsoxseg"#nf#"ei128.v">;
+ def VSOXSEG#nf#EI256_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
+ LSWidth256, "vsoxseg"#nf#"ei256.v">;
+ def VSOXSEG#nf#EI512_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
+ LSWidth512, "vsoxseg"#nf#"ei512.v">;
+ def VSOXSEG#nf#EI1024_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
+ LSWidth1024, "vsoxseg"#nf#"ei1024.v">;
}
} // Predicates = [HasStdExtZvlsseg]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 1d909e6c1a39..029561cd2fff 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2857,8 +2857,8 @@ foreach eew = EEWList in {
// Vector Indexed Loads and Stores
foreach eew = EEWList in {
- defm PseudoVLXEI # eew : VPseudoILoad;
- defm PseudoVSXEI # eew : VPseudoIStore;
+ defm PseudoVLOXEI # eew : VPseudoILoad;
+ defm PseudoVSOXEI # eew : VPseudoIStore;
defm PseudoVSUXEI # eew : VPseudoIStore;
}
@@ -3430,15 +3430,15 @@ foreach eew = EEWList in {
defvar elmul =!cast<LMULInfo>("V_" # elmul_str);
defvar idx_vti = !cast<VTypeInfo>("VI" # eew # elmul_str);
- defm : VPatILoad<"int_riscv_vlxe",
- "PseudoVLXEI"#eew,
+ defm : VPatILoad<"int_riscv_vloxei",
+ "PseudoVLOXEI"#eew,
vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
- defm : VPatIStore<"int_riscv_vsxe",
- "PseudoVSXEI"#eew,
+ defm : VPatIStore<"int_riscv_vsoxei",
+ "PseudoVSOXEI"#eew,
vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
- defm : VPatIStore<"int_riscv_vsuxe",
+ defm : VPatIStore<"int_riscv_vsuxei",
"PseudoVSUXEI"#eew,
vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW,
vlmul, elmul, vti.RegClass, idx_vti.RegClass>;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
new file mode 100644
index 000000000000..619624835c0f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
@@ -0,0 +1,4174 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>* %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>* %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>* %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>* %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>* %0,
+ <vscale x 64 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>* %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>* %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
new file mode 100644
index 000000000000..d54b7f858465
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
@@ -0,0 +1,5954 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m1,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>* %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>* %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>* %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>* %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>* %0,
+ <vscale x 64 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>* %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>* %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>* %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>* %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>* %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>* %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v16
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>* %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,tu,mu
+; CHECK-NEXT: vloxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
deleted file mode 100644
index 0792865f2a6e..000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll
+++ /dev/null
@@ -1,3281 +0,0 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8>* %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8>* %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8>* %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8>* %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8>* %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16>* %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16>* %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16>* %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16>* %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16>* %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32>* %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32>* %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32>* %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32>* %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i32(
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i32(
- <vscale x 1 x half>* %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i32(
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i32(
- <vscale x 2 x half>* %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i32(
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i32(
- <vscale x 4 x half>* %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i32(
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i32(
- <vscale x 8 x half>* %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i32(
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i32(
- <vscale x 16 x half>* %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i32(
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i32(
- <vscale x 1 x float>* %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i32(
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i32(
- <vscale x 2 x float>* %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i32(
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i32(
- <vscale x 4 x float>* %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i32(
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i32(
- <vscale x 8 x float>* %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i32(
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i32(
- <vscale x 16 x float>* %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8>* %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8>* %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8>* %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8>* %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8>* %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8>* %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16>* %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16>* %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16>* %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16>* %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16>* %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16>* %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32>* %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32>* %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32>* %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32>* %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i16(
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i16(
- <vscale x 1 x half>* %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i16(
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i16(
- <vscale x 2 x half>* %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i16(
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i16(
- <vscale x 4 x half>* %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i16(
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i16(
- <vscale x 8 x half>* %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i16(
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i16(
- <vscale x 16 x half>* %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i16(
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x half> @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i16(
- <vscale x 32 x half>* %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i16(
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i16(
- <vscale x 1 x float>* %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i16(
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i16(
- <vscale x 2 x float>* %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i16(
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i16(
- <vscale x 4 x float>* %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i16(
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i16(
- <vscale x 8 x float>* %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i16(
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i16(
- <vscale x 16 x float>* %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8>* %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8>* %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8>* %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8>* %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8>* %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8>* %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vlxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8>* %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16>* %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16>* %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16>* %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16>* %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16>* %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16>* %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32>* %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32>* %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32>* %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32>* %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i8(
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i8(
- <vscale x 1 x half>* %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i8(
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i8(
- <vscale x 2 x half>* %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i8(
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i8(
- <vscale x 4 x half>* %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i8(
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i8(
- <vscale x 8 x half>* %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i8(
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i8(
- <vscale x 16 x half>* %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i8(
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x half> @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i8(
- <vscale x 32 x half>* %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i8(
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i8(
- <vscale x 1 x float>* %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i8(
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i8(
- <vscale x 2 x float>* %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i8(
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i8(
- <vscale x 4 x float>* %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i8(
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i8(
- <vscale x 8 x float>* %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i8(
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i8(
- <vscale x 16 x float>* %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret <vscale x 16 x float> %a
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
deleted file mode 100644
index e76fbb0178be..000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll
+++ /dev/null
@@ -1,5361 +0,0 @@
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i64(
- <vscale x 1 x i8>*,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i64(
- <vscale x 1 x i8>* %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i64(
- <vscale x 2 x i8>*,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i64(
- <vscale x 2 x i8>* %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i64(
- <vscale x 4 x i8>*,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i64(
- <vscale x 4 x i8>* %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i64(
- <vscale x 8 x i8>*,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i64(
- <vscale x 8 x i8>* %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i64(
- <vscale x 1 x i16>*,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i64(
- <vscale x 1 x i16>* %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i64(
- <vscale x 2 x i16>*,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i64(
- <vscale x 2 x i16>* %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i64(
- <vscale x 4 x i16>*,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i64(
- <vscale x 4 x i16>* %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i64(
- <vscale x 8 x i16>*,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i64(
- <vscale x 8 x i16>* %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i64(
- <vscale x 1 x i32>*,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i64(
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i64(
- <vscale x 2 x i32>*,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i64(
- <vscale x 2 x i32>* %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i64(
- <vscale x 4 x i32>*,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i64(
- <vscale x 4 x i32>* %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i64(
- <vscale x 8 x i32>*,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i64(
- <vscale x 8 x i32>* %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i64(
- <vscale x 1 x i64>*,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i64(
- <vscale x 1 x i64>* %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i64(
- <vscale x 2 x i64>*,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i64(
- <vscale x 2 x i64>* %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i64(
- <vscale x 4 x i64>*,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i64(
- <vscale x 4 x i64>* %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i64(
- <vscale x 8 x i64>*,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i64(
- <vscale x 8 x i64>* %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i64(
- <vscale x 1 x half>*,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i64(
- <vscale x 1 x half>* %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i64(
- <vscale x 2 x half>*,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i64(
- <vscale x 2 x half>* %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i64(
- <vscale x 4 x half>*,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i64(
- <vscale x 4 x half>* %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i64(
- <vscale x 8 x half>*,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i64(
- <vscale x 8 x half>* %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i64(
- <vscale x 1 x float>*,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i64(
- <vscale x 1 x float>* %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i64(
- <vscale x 2 x float>*,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i64(
- <vscale x 2 x float>* %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i64(
- <vscale x 4 x float>*,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i64(
- <vscale x 4 x float>* %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i64(
- <vscale x 8 x float>*,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i64(
- <vscale x 8 x float>* %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i64(
- <vscale x 1 x double>*,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i64(
- <vscale x 1 x double>* %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i64(
- <vscale x 2 x double>*,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i64(
- <vscale x 2 x double>* %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i64(
- <vscale x 4 x double>*,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i64(
- <vscale x 4 x double>* %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i64(
- <vscale x 8 x double>*,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i64(
- <vscale x 8 x double>* %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8>* %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8>* %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8>* %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8>* %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8>* %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16>* %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16>* %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16>* %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16>* %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16>* %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32>* %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32>* %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32>* %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32>* %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i32(
- <vscale x 1 x i64>*,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i32(
- <vscale x 1 x i64>* %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i32(
- <vscale x 2 x i64>*,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i32(
- <vscale x 2 x i64>* %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i32(
- <vscale x 4 x i64>*,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i32(
- <vscale x 4 x i64>* %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i32(
- <vscale x 8 x i64>*,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i32(
- <vscale x 8 x i64>* %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i32(
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i32(
- <vscale x 1 x half>* %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i32(
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i32(
- <vscale x 2 x half>* %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i32(
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i32(
- <vscale x 4 x half>* %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i32(
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i32(
- <vscale x 8 x half>* %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i32(
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i32(
- <vscale x 16 x half>* %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i32(
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i32(
- <vscale x 1 x float>* %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i32(
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i32(
- <vscale x 2 x float>* %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i32(
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i32(
- <vscale x 4 x float>* %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i32(
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i32(
- <vscale x 8 x float>* %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i32(
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i32(
- <vscale x 16 x float>* %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i32(
- <vscale x 1 x double>*,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i32(
- <vscale x 1 x double>* %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i32(
- <vscale x 2 x double>*,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i32(
- <vscale x 2 x double>* %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i32(
- <vscale x 4 x double>*,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i32(
- <vscale x 4 x double>* %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i32(
- <vscale x 8 x double>*,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i32(
- <vscale x 8 x double>* %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8>* %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8>* %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8>* %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8>* %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8>* %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16>* %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16>* %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16>* %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16>* %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16>* %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32>* %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32>* %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32>* %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32>* %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i16(
- <vscale x 1 x i64>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i16(
- <vscale x 1 x i64>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i16(
- <vscale x 2 x i64>*,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i16(
- <vscale x 2 x i64>* %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i16(
- <vscale x 4 x i64>*,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i16(
- <vscale x 4 x i64>* %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i16(
- <vscale x 8 x i64>*,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i16(
- <vscale x 8 x i64>* %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i16(
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i16(
- <vscale x 1 x half>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i16(
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i16(
- <vscale x 2 x half>* %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i16(
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i16(
- <vscale x 4 x half>* %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i16(
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i16(
- <vscale x 8 x half>* %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i16(
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i16(
- <vscale x 16 x half>* %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i16(
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x half> @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i16(
- <vscale x 32 x half>* %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i16(
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i16(
- <vscale x 1 x float>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i16(
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i16(
- <vscale x 2 x float>* %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i16(
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i16(
- <vscale x 4 x float>* %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i16(
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i16(
- <vscale x 8 x float>* %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i16(
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i16(
- <vscale x 16 x float>* %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i16(
- <vscale x 1 x double>*,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i16(
- <vscale x 1 x double>* %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i16(
- <vscale x 2 x double>*,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i16(
- <vscale x 2 x double>* %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i16(
- <vscale x 4 x double>*,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i16(
- <vscale x 4 x double>* %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i16(
- <vscale x 8 x double>*,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i16(
- <vscale x 8 x double>* %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8>* %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i8> @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8>* %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i8> @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8>* %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i8> @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8>* %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i8> @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8>* %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i8> @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8>* %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x i8> @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vlxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8>* %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 64 x i8> @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16>* %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i16> @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16>* %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i16> @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16>* %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i16> @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16>* %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i16> @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16>* %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i16> @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16>* %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x i16> @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32>* %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i32> @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32>* %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i32> @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32>* %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i32> @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32>* %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i32> @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32>* %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x i32> @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i8(
- <vscale x 1 x i64>*,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.nxv1i64.nxv1i8(
- <vscale x 1 x i64>* %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x i64> @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i8(
- <vscale x 2 x i64>*,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.nxv2i64.nxv2i8(
- <vscale x 2 x i64>* %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x i64> @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i8(
- <vscale x 4 x i64>*,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.nxv4i64.nxv4i8(
- <vscale x 4 x i64>* %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x i64> @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i8(
- <vscale x 8 x i64>*,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.nxv8i64.nxv8i8(
- <vscale x 8 x i64>* %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x i64> @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i8(
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.nxv1f16.nxv1i8(
- <vscale x 1 x half>* %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x half> @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x half> @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i8(
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.nxv2f16.nxv2i8(
- <vscale x 2 x half>* %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x half> @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x half> @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i8(
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.nxv4f16.nxv4i8(
- <vscale x 4 x half>* %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x half> @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x half> @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i8(
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.nxv8f16.nxv8i8(
- <vscale x 8 x half>* %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x half> @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x half> @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i8(
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x half> @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.nxv16f16.nxv16i8(
- <vscale x 16 x half>* %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x half> @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x half> @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i8(
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x half> @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 32 x half> @llvm.riscv.vlxe.nxv32f16.nxv32i8(
- <vscale x 32 x half>* %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x half> @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 32 x half> @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i8(
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.nxv1f32.nxv1i8(
- <vscale x 1 x float>* %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x float> @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x float> @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i8(
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.nxv2f32.nxv2i8(
- <vscale x 2 x float>* %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x float> @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x float> @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i8(
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.nxv4f32.nxv4i8(
- <vscale x 4 x float>* %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x float> @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x float> @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i8(
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.nxv8f32.nxv8i8(
- <vscale x 8 x float>* %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x float> @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x float> @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i8(
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x float> @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.nxv16f32.nxv16i8(
- <vscale x 16 x float>* %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x float> @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 16 x float> @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i8(
- <vscale x 1 x double>*,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 1 x double> @llvm.riscv.vlxe.nxv1f64.nxv1i8(
- <vscale x 1 x double>* %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x double> @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 1 x double> @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i8(
- <vscale x 2 x double>*,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 2 x double> @llvm.riscv.vlxe.nxv2f64.nxv2i8(
- <vscale x 2 x double>* %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x double> @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 2 x double> @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i8(
- <vscale x 4 x double>*,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 4 x double> @llvm.riscv.vlxe.nxv4f64.nxv4i8(
- <vscale x 4 x double>* %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x double> @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 4 x double> @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i8(
- <vscale x 8 x double>*,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- %a = call <vscale x 8 x double> @llvm.riscv.vlxe.nxv8f64.nxv8i8(
- <vscale x 8 x double>* %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x double> @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- %a = call <vscale x 8 x double> @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret <vscale x 8 x double> %a
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
new file mode 100644
index 000000000000..dfa8171217a1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
@@ -0,0 +1,4398 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
new file mode 100644
index 000000000000..0b69e7691814
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
@@ -0,0 +1,6278 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsoxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll
deleted file mode 100644
index f4d4fc7fe621..000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll
+++ /dev/null
@@ -1,3445 +0,0 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
-declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll
deleted file mode 100644
index 2a766d5d2f35..000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll
+++ /dev/null
@@ -1,5629 +0,0 @@
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
-declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i64(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i8.nxv1i64(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i64(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i64(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i64(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i8.nxv2i64(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i64(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i64(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i64(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i8.nxv4i64(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i64(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i64(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i64(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i8.nxv8i64(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i64(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i64(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i64(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i16.nxv1i64(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i64(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i64(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i64(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i16.nxv2i64(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i64(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i64(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i64(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i16.nxv4i64(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i64(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i64(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i64(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i16.nxv8i64(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i64(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i64(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i64(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i32.nxv1i64(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i64(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i64(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i64(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i32.nxv2i64(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i64(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i64(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i64(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i32.nxv4i64(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i64(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i64(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i64(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i32.nxv8i64(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i64(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i64(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i64(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f16.nxv1i64(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i64(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i64(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i64(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f16.nxv2i64(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i64(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i64(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i64(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f16.nxv4i64(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i64(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i64(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i64(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f16.nxv8i64(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i64(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i64(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i64(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f32.nxv1i64(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i64(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i64(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i64(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f32.nxv2i64(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i64(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i64(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i64(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f32.nxv4i64(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i64(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i64(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i64(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f32.nxv8i64(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i64(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i64(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f64.nxv1i64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f64.nxv2i64(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i64(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f64.nxv4i64(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i64(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f64.nxv8i64(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i64(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i32(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i64.nxv1i32(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i32(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i32(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i32(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i64.nxv2i32(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i32(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i32(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i32(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i64.nxv4i32(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i32(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i32(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i32(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i64.nxv8i32(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i32(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i32(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i32(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f64.nxv1i32(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i32(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i32(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i32(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f64.nxv2i32(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i32(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i32(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i32(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f64.nxv4i32(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i32(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i32(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i32(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f64.nxv8i32(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i32(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i32(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i16(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i64.nxv1i16(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i16(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i16(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i16(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i64.nxv2i16(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i16(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i16(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i16(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i64.nxv4i16(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i16(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i16(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i16(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i64.nxv8i16(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i16(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i16(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i16(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f64.nxv1i16(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i16(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i16(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i16(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f64.nxv2i16(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i16(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i16(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i16(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f64.nxv4i16(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i16(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i16(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i16(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f64.nxv8i16(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i16(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i16(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i8(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1i64.nxv1i8(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i8(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i8(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i8(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2i64.nxv2i8(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i8(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i8(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i8(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4i64.nxv4i8(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i8(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i8(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i8(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8i64.nxv8i8(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i8(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i8(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i8(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv1f64.nxv1i8(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i8(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i8(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i8(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv2f64.nxv2i8(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i8(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i8(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i8(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv4f64.nxv4i8(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i8(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i8(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i8(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsuxe.nxv8f64.nxv8i8(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i8(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i8(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
new file mode 100644
index 000000000000..494d241076b9
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
@@ -0,0 +1,4398 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i32);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
new file mode 100644
index 000000000000..41ace54ebb21
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
@@ -0,0 +1,6278 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei64.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei32.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m2,ta,mu
+; CHECK-NEXT: vle16.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei16.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>*,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8>* %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>*,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half>* %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>*,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m2,ta,mu
+; CHECK-NEXT: vle8.v v26, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v26, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float>* %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>*,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double>* %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>*,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double>* %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>*,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double>* %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ i64);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>*,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m1,ta,mu
+; CHECK-NEXT: vle8.v v25, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsuxei8.v v16, (a0), v25, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double>* %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll
deleted file mode 100644
index 7795b72f499b..000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll
+++ /dev/null
@@ -1,3445 +0,0 @@
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
-declare void @llvm.riscv.vsxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- i32);
-
-define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- i32 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32);
-
-define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4)
-
- ret void
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll
deleted file mode 100644
index d7d7b0d319b0..000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll
+++ /dev/null
@@ -1,5629 +0,0 @@
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
-declare void @llvm.riscv.vsxe.nxv1i8.nxv1i64(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i8.nxv1i64(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i64(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i64(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i8.nxv2i64(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i8.nxv2i64(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i64(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i64(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i8.nxv4i64(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i8.nxv4i64(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i64(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i64(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i8.nxv8i64(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i8.nxv8i64(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i64(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i64(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i16.nxv1i64(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i16.nxv1i64(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i64(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i64(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i16.nxv2i64(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i16.nxv2i64(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i64(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i64(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i16.nxv4i64(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i16.nxv4i64(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i64(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i64(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i16.nxv8i64(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i16.nxv8i64(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i64(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i64(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i32.nxv1i64(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i32.nxv1i64(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i64(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i64(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i32.nxv2i64(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i32.nxv2i64(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i64(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i64(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i32.nxv4i64(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i32.nxv4i64(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i64(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i64(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i32.nxv8i64(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i32.nxv8i64(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i64(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i64(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f16.nxv1i64(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f16.nxv1i64(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i64(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i64(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f16.nxv2i64(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f16.nxv2i64(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i64(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i64(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f16.nxv4i64(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f16.nxv4i64(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i64(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i64(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f16.nxv8i64(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f16.nxv8i64(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i64(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i64(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f32.nxv1i64(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f32.nxv1i64(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i64(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i64(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f32.nxv2i64(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f32.nxv2i64(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i64(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i64(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f32.nxv4i64(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f32.nxv4i64(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i64(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i64(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f32.nxv8i64(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f32.nxv8i64(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i64(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i64(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f64.nxv1i64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f64.nxv1i64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i64(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i64(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f64.nxv2i64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f64.nxv2i64(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i64(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i64(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f64.nxv4i64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f64.nxv4i64(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i64(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i64(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f64.nxv8i64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i64>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f64.nxv8i64(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i64> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i64(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i64(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i64.nxv1i32(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i64.nxv1i32(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i32(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i32(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i64.nxv2i32(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i64.nxv2i32(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i32(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i32(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i64.nxv4i32(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i64.nxv4i32(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i32(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i32(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i64.nxv8i32(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i64.nxv8i32(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i32(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i32(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f64.nxv1i32(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f64.nxv1i32(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i32(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i32(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f64.nxv2i32(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f64.nxv2i32(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i32(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i32(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f64.nxv4i32(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f64.nxv4i32(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i32(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i32(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f64.nxv8i32(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i32>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f64.nxv8i32(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i32> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i32(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i32(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i64.nxv1i16(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i64.nxv1i16(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i16(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i16(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i64.nxv2i16(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i64.nxv2i16(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i16(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i16(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i64.nxv4i16(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i64.nxv4i16(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i16(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i16(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i64.nxv8i16(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i64.nxv8i16(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i16(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i16(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f64.nxv1i16(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f64.nxv1i16(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i16(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i16(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f64.nxv2i16(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f64.nxv2i16(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i16(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i16(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f64.nxv4i16(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f64.nxv4i16(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i16(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i16(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f64.nxv8i16(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i16>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f64.nxv8i16(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i16> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i16(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i16(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>*,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8>* %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1i64.nxv1i8(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1i64.nxv1i8(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i8(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i8(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2i64.nxv2i8(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2i64.nxv2i8(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i8(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i8(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4i64.nxv4i8(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4i64.nxv4i8(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i8(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i8(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8i64.nxv8i8(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8i64.nxv8i8(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i8(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i8(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half>,
- <vscale x 1 x half>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8(
- <vscale x 1 x half> %0,
- <vscale x 1 x half>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half>,
- <vscale x 2 x half>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8(
- <vscale x 2 x half> %0,
- <vscale x 2 x half>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half>,
- <vscale x 4 x half>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8(
- <vscale x 4 x half> %0,
- <vscale x 4 x half>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half>,
- <vscale x 8 x half>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8(
- <vscale x 8 x half> %0,
- <vscale x 8 x half>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half>,
- <vscale x 16 x half>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8(
- <vscale x 16 x half> %0,
- <vscale x 16 x half>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half>,
- <vscale x 32 x half>*,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8(
- <vscale x 32 x half> %0,
- <vscale x 32 x half>* %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float>,
- <vscale x 1 x float>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8(
- <vscale x 1 x float> %0,
- <vscale x 1 x float>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float>,
- <vscale x 2 x float>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8(
- <vscale x 2 x float> %0,
- <vscale x 2 x float>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float>,
- <vscale x 4 x float>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8(
- <vscale x 4 x float> %0,
- <vscale x 4 x float>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float>,
- <vscale x 8 x float>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8(
- <vscale x 8 x float> %0,
- <vscale x 8 x float>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float>,
- <vscale x 16 x float>*,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8(
- <vscale x 16 x float> %0,
- <vscale x 16 x float>* %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv1f64.nxv1i8(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv1f64.nxv1i8(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i8(
- <vscale x 1 x double>,
- <vscale x 1 x double>*,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i8(
- <vscale x 1 x double> %0,
- <vscale x 1 x double>* %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv2f64.nxv2i8(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv2f64.nxv2i8(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i8(
- <vscale x 2 x double>,
- <vscale x 2 x double>*,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i8(
- <vscale x 2 x double> %0,
- <vscale x 2 x double>* %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv4f64.nxv4i8(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv4f64.nxv4i8(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i8(
- <vscale x 4 x double>,
- <vscale x 4 x double>*,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i8(
- <vscale x 4 x double> %0,
- <vscale x 4 x double>* %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.nxv8f64.nxv8i8(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i8>,
- i64);
-
-define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}
- call void @llvm.riscv.vsxe.nxv8f64.nxv8i8(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i8> %2,
- i64 %3)
-
- ret void
-}
-
-declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i8(
- <vscale x 8 x double>,
- <vscale x 8 x double>*,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64);
-
-define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-entry:
-; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t
- call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i8(
- <vscale x 8 x double> %0,
- <vscale x 8 x double>* %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4)
-
- ret void
-}
diff --git a/llvm/test/MC/RISCV/rvv/aliases.s b/llvm/test/MC/RISCV/rvv/aliases.s
index 7f937dcfcfd9..2e5120c91e45 100644
--- a/llvm/test/MC/RISCV/rvv/aliases.s
+++ b/llvm/test/MC/RISCV/rvv/aliases.s
@@ -54,6 +54,18 @@ vmset.m v0
# ALIAS: vmnot.m v0, v1 # encoding: [0x57,0xa0,0x10,0x76]
# NO-ALIAS: vmnand.mm v0, v1, v1 # encoding: [0x57,0xa0,0x10,0x76]
vmnot.m v0, v1
+# ALIAS: vl1r.v v0, (a0) # encoding: [0x07,0x00,0x85,0x22]
+# NO-ALIAS: vl1re8.v v0, (a0) # encoding: [0x07,0x00,0x85,0x22]
+vl1r.v v0, (a0)
+# ALIAS: vl2r.v v0, (a0) # encoding: [0x07,0x00,0x85,0x42]
+# NO-ALIAS: vl2re8.v v0, (a0) # encoding: [0x07,0x00,0x85,0x42]
+vl2r.v v0, (a0)
+# ALIAS: vl4r.v v0, (a0) # encoding: [0x07,0x00,0x85,0x82]
+# NO-ALIAS: vl4re8.v v0, (a0) # encoding: [0x07,0x00,0x85,0x82]
+vl4r.v v0, (a0)
+# ALIAS: vl8r.v v0, (a0) # encoding: [0x07,0x00,0x85,0x02]
+# NO-ALIAS: vl8re8.v v0, (a0) # encoding: [0x07,0x00,0x85,0x02]
+vl8r.v v0, (a0)
# ALIAS: vneg.v v2, v1, v0.t # encoding: [0x57,0x41,0x10,0x0c]
# NO-ALIAS: vrsub.vx v2, v1, zero, v0.t # encoding: [0x57,0x41,0x10,0x0c]
vneg.v v2, v1, v0.t
diff --git a/llvm/test/MC/RISCV/rvv/load.s b/llvm/test/MC/RISCV/rvv/load.s
index eac3191c2d6b..4841f5757e76 100644
--- a/llvm/test/MC/RISCV/rvv/load.s
+++ b/llvm/test/MC/RISCV/rvv/load.s
@@ -1,12 +1,12 @@
# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: --riscv-no-aliases | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \
-# RUN: | llvm-objdump -d --mattr=+experimental-v - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: | llvm-objdump -d --mattr=+experimental-v - --riscv-no-aliases \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vle8.v v8, (a0), v0.t
# CHECK-INST: vle8.v v8, (a0), v0.t
@@ -296,104 +296,290 @@ vlse1024.v v8, (a0), a1
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 74 b5 1a <unknown>
-vlxei8.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxei8.v v8, (a0), v4, v0.t
+vluxei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x04]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 04 45 04 <unknown>
+
+vluxei8.v v8, (a0), v4
+# CHECK-INST: vluxei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x06]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 04 45 06 <unknown>
+
+vluxei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x04]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 54 45 04 <unknown>
+
+vluxei16.v v8, (a0), v4
+# CHECK-INST: vluxei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x06]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 54 45 06 <unknown>
+
+vluxei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x04]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 64 45 04 <unknown>
+
+vluxei32.v v8, (a0), v4
+# CHECK-INST: vluxei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x06]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 64 45 06 <unknown>
+
+vluxei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x04]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 74 45 04 <unknown>
+
+vluxei64.v v8, (a0), v4
+# CHECK-INST: vluxei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x06]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 74 45 06 <unknown>
+
+vloxei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxei8.v v8, (a0), v4, v0.t
# CHECK-ENCODING: [0x07,0x04,0x45,0x0c]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 04 45 0c <unknown>
-vlxei8.v v8, (a0), v4
-# CHECK-INST: vlxei8.v v8, (a0), v4
+vloxei8.v v8, (a0), v4
+# CHECK-INST: vloxei8.v v8, (a0), v4
# CHECK-ENCODING: [0x07,0x04,0x45,0x0e]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 04 45 0e <unknown>
-vlxei16.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxei16.v v8, (a0), v4, v0.t
+vloxei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxei16.v v8, (a0), v4, v0.t
# CHECK-ENCODING: [0x07,0x54,0x45,0x0c]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 54 45 0c <unknown>
-vlxei16.v v8, (a0), v4
-# CHECK-INST: vlxei16.v v8, (a0), v4
+vloxei16.v v8, (a0), v4
+# CHECK-INST: vloxei16.v v8, (a0), v4
# CHECK-ENCODING: [0x07,0x54,0x45,0x0e]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 54 45 0e <unknown>
-vlxei32.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxei32.v v8, (a0), v4, v0.t
+vloxei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxei32.v v8, (a0), v4, v0.t
# CHECK-ENCODING: [0x07,0x64,0x45,0x0c]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 64 45 0c <unknown>
-vlxei32.v v8, (a0), v4
-# CHECK-INST: vlxei32.v v8, (a0), v4
+vloxei32.v v8, (a0), v4
+# CHECK-INST: vloxei32.v v8, (a0), v4
# CHECK-ENCODING: [0x07,0x64,0x45,0x0e]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 64 45 0e <unknown>
-vlxei64.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxei64.v v8, (a0), v4, v0.t
+vloxei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxei64.v v8, (a0), v4, v0.t
# CHECK-ENCODING: [0x07,0x74,0x45,0x0c]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 74 45 0c <unknown>
-vlxei64.v v8, (a0), v4
-# CHECK-INST: vlxei64.v v8, (a0), v4
+vloxei64.v v8, (a0), v4
+# CHECK-INST: vloxei64.v v8, (a0), v4
# CHECK-ENCODING: [0x07,0x74,0x45,0x0e]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 74 45 0e <unknown>
-vlxei128.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxei128.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x1c]
+vl1re8.v v8, (a0)
+# CHECK-INST: vl1re8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x85,0x22]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 04 85 22 <unknown>
+
+vl1re16.v v8, (a0)
+# CHECK-INST: vl1re16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x85,0x22]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 54 85 22 <unknown>
+
+vl1re32.v v8, (a0)
+# CHECK-INST: vl1re32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x85,0x22]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 64 85 22 <unknown>
+
+vl1re64.v v8, (a0)
+# CHECK-INST: vl1re64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x85,0x22]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 74 85 22 <unknown>
+
+vl1re128.v v8, (a0)
+# CHECK-INST: vl1re128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x85,0x32]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 04 85 32 <unknown>
+
+vl1re256.v v8, (a0)
+# CHECK-INST: vl1re256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x85,0x32]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 54 85 32 <unknown>
+
+vl1re512.v v8, (a0)
+# CHECK-INST: vl1re512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x85,0x32]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 64 85 32 <unknown>
+
+vl1re1024.v v8, (a0)
+# CHECK-INST: vl1re1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x85,0x32]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 74 85 32 <unknown>
+
+vl2re8.v v8, (a0)
+# CHECK-INST: vl2re8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x85,0x42]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 04 85 42 <unknown>
+
+vl2re16.v v8, (a0)
+# CHECK-INST: vl2re16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x85,0x42]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 54 85 42 <unknown>
+
+vl2re32.v v8, (a0)
+# CHECK-INST: vl2re32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x85,0x42]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 64 85 42 <unknown>
+
+vl2re64.v v8, (a0)
+# CHECK-INST: vl2re64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x85,0x42]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 74 85 42 <unknown>
+
+vl2re128.v v8, (a0)
+# CHECK-INST: vl2re128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x85,0x52]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 04 85 52 <unknown>
+
+vl2re256.v v8, (a0)
+# CHECK-INST: vl2re256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x85,0x52]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 54 85 52 <unknown>
+
+vl2re512.v v8, (a0)
+# CHECK-INST: vl2re512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x85,0x52]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 64 85 52 <unknown>
+
+vl2re1024.v v8, (a0)
+# CHECK-INST: vl2re1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x85,0x52]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 07 04 45 1c <unknown>
+# CHECK-UNKNOWN: 07 74 85 52 <unknown>
-vlxei128.v v8, (a0), v4
-# CHECK-INST: vlxei128.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x1e]
+vl4re8.v v8, (a0)
+# CHECK-INST: vl4re8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x85,0x82]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 07 04 45 1e <unknown>
+# CHECK-UNKNOWN: 07 04 85 82 <unknown>
-vlxei256.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxei256.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x1c]
+vl4re16.v v8, (a0)
+# CHECK-INST: vl4re16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x85,0x82]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 07 54 45 1c <unknown>
+# CHECK-UNKNOWN: 07 54 85 82 <unknown>
-vlxei256.v v8, (a0), v4
-# CHECK-INST: vlxei256.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x1e]
+vl4re32.v v8, (a0)
+# CHECK-INST: vl4re32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x85,0x82]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 07 54 45 1e <unknown>
+# CHECK-UNKNOWN: 07 64 85 82 <unknown>
-vlxei512.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxei512.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x1c]
+vl4re64.v v8, (a0)
+# CHECK-INST: vl4re64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x85,0x82]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 07 64 45 1c <unknown>
+# CHECK-UNKNOWN: 07 74 85 82 <unknown>
-vlxei512.v v8, (a0), v4
-# CHECK-INST: vlxei512.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x1e]
+vl4re128.v v8, (a0)
+# CHECK-INST: vl4re128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x85,0x92]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 07 64 45 1e <unknown>
+# CHECK-UNKNOWN: 07 04 85 92 <unknown>
-vlxei1024.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxei1024.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x1c]
+vl4re256.v v8, (a0)
+# CHECK-INST: vl4re256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x85,0x92]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 07 74 45 1c <unknown>
+# CHECK-UNKNOWN: 07 54 85 92 <unknown>
-vlxei1024.v v8, (a0), v4
-# CHECK-INST: vlxei1024.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x1e]
+vl4re512.v v8, (a0)
+# CHECK-INST: vl4re512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x85,0x92]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 07 74 45 1e <unknown>
+# CHECK-UNKNOWN: 07 64 85 92 <unknown>
-vl1r.v v8, (a0)
-# CHECK-INST: vl1r.v v8, (a0)
+vl4re1024.v v8, (a0)
+# CHECK-INST: vl4re1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x85,0x92]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 74 85 92 <unknown>
+
+vl8re8.v v8, (a0)
+# CHECK-INST: vl8re8.v v8, (a0)
# CHECK-ENCODING: [0x07,0x04,0x85,0x02]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 07 04 85 02 <unknown>
+
+vl8re16.v v8, (a0)
+# CHECK-INST: vl8re16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x85,0x02]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 54 85 02 <unknown>
+
+vl8re32.v v8, (a0)
+# CHECK-INST: vl8re32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x85,0x02]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 64 85 02 <unknown>
+
+vl8re64.v v8, (a0)
+# CHECK-INST: vl8re64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x85,0x02]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 74 85 02 <unknown>
+
+vl8re128.v v8, (a0)
+# CHECK-INST: vl8re128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x85,0x12]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 04 85 12 <unknown>
+
+vl8re256.v v8, (a0)
+# CHECK-INST: vl8re256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x85,0x12]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 54 85 12 <unknown>
+
+vl8re512.v v8, (a0)
+# CHECK-INST: vl8re512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x85,0x12]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 64 85 12 <unknown>
+
+vl8re1024.v v8, (a0)
+# CHECK-INST: vl8re1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x85,0x12]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 07 74 85 12 <unknown>
diff --git a/llvm/test/MC/RISCV/rvv/store.s b/llvm/test/MC/RISCV/rvv/store.s
index 75dacb9d19e5..8437bf7f9030 100644
--- a/llvm/test/MC/RISCV/rvv/store.s
+++ b/llvm/test/MC/RISCV/rvv/store.s
@@ -1,12 +1,12 @@
# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: --riscv-no-aliases | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \
-# RUN: | llvm-objdump -d --mattr=+experimental-v - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: | llvm-objdump -d --mattr=+experimental-v - --riscv-no-aliases \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v %s \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
vse8.v v24, (a0), v0.t
# CHECK-INST: vse8.v v24, (a0), v0.t
@@ -200,104 +200,122 @@ vsse1024.v v24, (a0), a1
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 7c b5 1a <unknown>
-vsxei8.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxei8.v v24, (a0), v4, v0.t
+vsuxei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x04]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 27 0c 45 04 <unknown>
+
+vsuxei8.v v24, (a0), v4
+# CHECK-INST: vsuxei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x06]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 27 0c 45 06 <unknown>
+
+vsuxei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x04]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 27 5c 45 04 <unknown>
+
+vsuxei16.v v24, (a0), v4
+# CHECK-INST: vsuxei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x06]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 27 5c 45 06 <unknown>
+
+vsuxei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x04]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 27 6c 45 04 <unknown>
+
+vsuxei32.v v24, (a0), v4
+# CHECK-INST: vsuxei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x06]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 27 6c 45 06 <unknown>
+
+vsuxei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x04]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 27 7c 45 04 <unknown>
+
+vsuxei64.v v24, (a0), v4
+# CHECK-INST: vsuxei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x06]
+# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
+# CHECK-UNKNOWN: 27 7c 45 06 <unknown>
+
+vsoxei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxei8.v v24, (a0), v4, v0.t
# CHECK-ENCODING: [0x27,0x0c,0x45,0x0c]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 0c 45 0c <unknown>
-vsxei8.v v24, (a0), v4
-# CHECK-INST: vsxei8.v v24, (a0), v4
+vsoxei8.v v24, (a0), v4
+# CHECK-INST: vsoxei8.v v24, (a0), v4
# CHECK-ENCODING: [0x27,0x0c,0x45,0x0e]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 0c 45 0e <unknown>
-vsxei16.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxei16.v v24, (a0), v4, v0.t
+vsoxei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxei16.v v24, (a0), v4, v0.t
# CHECK-ENCODING: [0x27,0x5c,0x45,0x0c]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 5c 45 0c <unknown>
-vsxei16.v v24, (a0), v4
-# CHECK-INST: vsxei16.v v24, (a0), v4
+vsoxei16.v v24, (a0), v4
+# CHECK-INST: vsoxei16.v v24, (a0), v4
# CHECK-ENCODING: [0x27,0x5c,0x45,0x0e]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 5c 45 0e <unknown>
-vsxei32.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxei32.v v24, (a0), v4, v0.t
+vsoxei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxei32.v v24, (a0), v4, v0.t
# CHECK-ENCODING: [0x27,0x6c,0x45,0x0c]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 6c 45 0c <unknown>
-vsxei32.v v24, (a0), v4
-# CHECK-INST: vsxei32.v v24, (a0), v4
+vsoxei32.v v24, (a0), v4
+# CHECK-INST: vsoxei32.v v24, (a0), v4
# CHECK-ENCODING: [0x27,0x6c,0x45,0x0e]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 6c 45 0e <unknown>
-vsxei64.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxei64.v v24, (a0), v4, v0.t
+vsoxei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxei64.v v24, (a0), v4, v0.t
# CHECK-ENCODING: [0x27,0x7c,0x45,0x0c]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 7c 45 0c <unknown>
-vsxei64.v v24, (a0), v4
-# CHECK-INST: vsxei64.v v24, (a0), v4
+vsoxei64.v v24, (a0), v4
+# CHECK-INST: vsoxei64.v v24, (a0), v4
# CHECK-ENCODING: [0x27,0x7c,0x45,0x0e]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 7c 45 0e <unknown>
-vsxei128.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxei128.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x1c]
-# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 27 0c 45 1c <unknown>
-
-vsxei128.v v24, (a0), v4
-# CHECK-INST: vsxei128.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x1e]
-# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 27 0c 45 1e <unknown>
-
-vsxei256.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxei256.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x1c]
-# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 27 5c 45 1c <unknown>
-
-vsxei256.v v24, (a0), v4
-# CHECK-INST: vsxei256.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x1e]
-# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 27 5c 45 1e <unknown>
-
-vsxei512.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxei512.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x1c]
-# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 27 6c 45 1c <unknown>
-
-vsxei512.v v24, (a0), v4
-# CHECK-INST: vsxei512.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x1e]
+vs1r.v v24, (a0)
+# CHECK-INST: vs1r.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x85,0x22]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 27 6c 45 1e <unknown>
+# CHECK-UNKNOWN: 27 0c 85 22 <unknown>
-vsxei1024.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxei1024.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x1c]
+vs2r.v v24, (a0)
+# CHECK-INST: vs2r.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x85,0x42]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 27 7c 45 1c <unknown>
+# CHECK-UNKNOWN: 27 0c 85 42 <unknown>
-vsxei1024.v v24, (a0), v4
-# CHECK-INST: vsxei1024.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x1e]
+vs4r.v v24, (a0)
+# CHECK-INST: vs4r.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x85,0x82]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
-# CHECK-UNKNOWN: 27 7c 45 1e <unknown>
+# CHECK-UNKNOWN: 27 0c 85 82 <unknown>
-vs1r.v v24, (a0)
-# CHECK-INST: vs1r.v v24, (a0)
+vs8r.v v24, (a0)
+# CHECK-INST: vs8r.v v24, (a0)
# CHECK-ENCODING: [0x27,0x0c,0x85,0x02]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 27 0c 85 02 <unknown>
diff --git a/llvm/test/MC/RISCV/rvv/zvlsseg.s b/llvm/test/MC/RISCV/rvv/zvlsseg.s
index c8794fd807b0..b41b6984bcb2 100644
--- a/llvm/test/MC/RISCV/rvv/zvlsseg.s
+++ b/llvm/test/MC/RISCV/rvv/zvlsseg.s
@@ -1,4713 +1,4717 @@
-# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-zvlsseg %s \
-# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -show-encoding --mattr=+experimental-v %s \
+# RUN: --mattr=+experimental-zvlsseg --riscv-no-aliases \
+# RUN: | FileCheck %s --check-prefixes=CHECK-ENCODING,CHECK-INST
# RUN: not llvm-mc -triple=riscv64 -show-encoding %s 2>&1 \
-# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-zvlsseg %s \
-# RUN: | llvm-objdump -d --mattr=+experimental-zvlsseg - \
-# RUN: | FileCheck %s --check-prefix=CHECK-INST
-# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-zvlsseg %s \
-# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+# RUN: | FileCheck %s --check-prefix=CHECK-ERROR
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v \
+# RUN: --mattr=+experimental-zvlsseg %s \
+# RUN: | llvm-objdump -d --mattr=+experimental-v --mattr=+experimental-zvlsseg \
+# RUN: --riscv-no-aliases - \
+# RUN: | FileCheck %s --check-prefix=CHECK-INST
+# RUN: llvm-mc -triple=riscv64 -filetype=obj --mattr=+experimental-v \
+# RUN: --mattr=+experimental-zvlsseg %s \
+# RUN: | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-UNKNOWN
+
+vlseg2e8.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e8.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x20]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 20 <unknown>
vlseg2e8.v v8, (a0)
# CHECK-INST: vlseg2e8.v v8, (a0)
# CHECK-ENCODING: [0x07,0x04,0x05,0x22]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 05 22 <unknown>
+vlseg2e16.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e16.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x20]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 20 <unknown>
+
vlseg2e16.v v8, (a0)
# CHECK-INST: vlseg2e16.v v8, (a0)
# CHECK-ENCODING: [0x07,0x54,0x05,0x22]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 05 22 <unknown>
+vlseg2e32.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e32.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x20]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 20 <unknown>
+
vlseg2e32.v v8, (a0)
# CHECK-INST: vlseg2e32.v v8, (a0)
# CHECK-ENCODING: [0x07,0x64,0x05,0x22]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 05 22 <unknown>
+vlseg2e64.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e64.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x20]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 20 <unknown>
+
vlseg2e64.v v8, (a0)
# CHECK-INST: vlseg2e64.v v8, (a0)
# CHECK-ENCODING: [0x07,0x74,0x05,0x22]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 05 22 <unknown>
+vlseg2e128.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e128.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x30]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 30 <unknown>
+
vlseg2e128.v v8, (a0)
# CHECK-INST: vlseg2e128.v v8, (a0)
# CHECK-ENCODING: [0x07,0x04,0x05,0x32]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 05 32 <unknown>
+vlseg2e256.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e256.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x30]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 30 <unknown>
+
vlseg2e256.v v8, (a0)
# CHECK-INST: vlseg2e256.v v8, (a0)
# CHECK-ENCODING: [0x07,0x54,0x05,0x32]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 05 32 <unknown>
+vlseg2e512.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e512.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x30]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 30 <unknown>
+
vlseg2e512.v v8, (a0)
# CHECK-INST: vlseg2e512.v v8, (a0)
# CHECK-ENCODING: [0x07,0x64,0x05,0x32]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 05 32 <unknown>
+vlseg2e1024.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e1024.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x30]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 30 <unknown>
+
vlseg2e1024.v v8, (a0)
# CHECK-INST: vlseg2e1024.v v8, (a0)
# CHECK-ENCODING: [0x07,0x74,0x05,0x32]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 05 32 <unknown>
-vlseg2e8.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e8.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x20]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 20 <unknown>
+vlseg2e8ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e8ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x21]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 21 <unknown>
-vlseg2e16.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e16.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x20]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 20 <unknown>
+vlseg2e8ff.v v8, (a0)
+# CHECK-INST: vlseg2e8ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x23]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 23 <unknown>
-vlseg2e32.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e32.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x20]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 20 <unknown>
+vlseg2e16ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e16ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x21]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 21 <unknown>
-vlseg2e64.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e64.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x20]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 20 <unknown>
+vlseg2e16ff.v v8, (a0)
+# CHECK-INST: vlseg2e16ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x23]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 23 <unknown>
-vlseg2e128.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e128.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x30]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 30 <unknown>
+vlseg2e32ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e32ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x21]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 21 <unknown>
-vlseg2e256.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e256.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x30]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 30 <unknown>
+vlseg2e32ff.v v8, (a0)
+# CHECK-INST: vlseg2e32ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x23]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 23 <unknown>
-vlseg2e512.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e512.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x30]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 30 <unknown>
+vlseg2e64ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e64ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x21]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 21 <unknown>
-vlseg2e1024.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e1024.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x30]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 30 <unknown>
+vlseg2e64ff.v v8, (a0)
+# CHECK-INST: vlseg2e64ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x23]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 23 <unknown>
-vlseg3e8.v v8, (a0)
-# CHECK-INST: vlseg3e8.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x42]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 42 <unknown>
+vlseg2e128ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e128ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x31]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 31 <unknown>
-vlseg3e16.v v8, (a0)
-# CHECK-INST: vlseg3e16.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x42]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 42 <unknown>
+vlseg2e128ff.v v8, (a0)
+# CHECK-INST: vlseg2e128ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x33]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 33 <unknown>
-vlseg3e32.v v8, (a0)
-# CHECK-INST: vlseg3e32.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x42]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 42 <unknown>
+vlseg2e256ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e256ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x31]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 31 <unknown>
-vlseg3e64.v v8, (a0)
-# CHECK-INST: vlseg3e64.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x42]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 42 <unknown>
+vlseg2e256ff.v v8, (a0)
+# CHECK-INST: vlseg2e256ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x33]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 33 <unknown>
-vlseg3e128.v v8, (a0)
-# CHECK-INST: vlseg3e128.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x52]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 52 <unknown>
+vlseg2e512ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e512ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x31]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 31 <unknown>
-vlseg3e256.v v8, (a0)
-# CHECK-INST: vlseg3e256.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x52]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 52 <unknown>
+vlseg2e512ff.v v8, (a0)
+# CHECK-INST: vlseg2e512ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x33]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 33 <unknown>
-vlseg3e512.v v8, (a0)
-# CHECK-INST: vlseg3e512.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x52]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 52 <unknown>
+vlseg2e1024ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg2e1024ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x31]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 31 <unknown>
-vlseg3e1024.v v8, (a0)
-# CHECK-INST: vlseg3e1024.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x52]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 52 <unknown>
+vlseg2e1024ff.v v8, (a0)
+# CHECK-INST: vlseg2e1024ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x33]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 33 <unknown>
-vlseg3e8.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e8.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x40]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 40 <unknown>
+vlsseg2e8.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg2e8.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x28]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 28 <unknown>
-vlseg3e16.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e16.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x40]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 40 <unknown>
+vlsseg2e8.v v8, (a0), a1
+# CHECK-INST: vlsseg2e8.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x2a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 2a <unknown>
-vlseg3e32.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e32.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x40]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 40 <unknown>
+vlsseg2e16.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg2e16.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x28]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 28 <unknown>
-vlseg3e64.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e64.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x40]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 40 <unknown>
+vlsseg2e16.v v8, (a0), a1
+# CHECK-INST: vlsseg2e16.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x2a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 2a <unknown>
-vlseg3e128.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e128.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x50]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 50 <unknown>
+vlsseg2e32.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg2e32.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x28]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 28 <unknown>
-vlseg3e256.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e256.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x50]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 50 <unknown>
+vlsseg2e32.v v8, (a0), a1
+# CHECK-INST: vlsseg2e32.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x2a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 2a <unknown>
-vlseg3e512.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e512.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x50]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 50 <unknown>
+vlsseg2e64.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg2e64.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x28]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 28 <unknown>
-vlseg3e1024.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e1024.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x50]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 50 <unknown>
+vlsseg2e64.v v8, (a0), a1
+# CHECK-INST: vlsseg2e64.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x2a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 2a <unknown>
-vlseg4e8.v v8, (a0)
-# CHECK-INST: vlseg4e8.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x62]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 62 <unknown>
+vlsseg2e128.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg2e128.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x38]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 38 <unknown>
-vlseg4e16.v v8, (a0)
-# CHECK-INST: vlseg4e16.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x62]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 62 <unknown>
+vlsseg2e128.v v8, (a0), a1
+# CHECK-INST: vlsseg2e128.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x3a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 3a <unknown>
-vlseg4e32.v v8, (a0)
-# CHECK-INST: vlseg4e32.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x62]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 62 <unknown>
+vlsseg2e256.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg2e256.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x38]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 38 <unknown>
-vlseg4e64.v v8, (a0)
-# CHECK-INST: vlseg4e64.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x62]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 62 <unknown>
+vlsseg2e256.v v8, (a0), a1
+# CHECK-INST: vlsseg2e256.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x3a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 3a <unknown>
-vlseg4e128.v v8, (a0)
-# CHECK-INST: vlseg4e128.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x72]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 72 <unknown>
+vlsseg2e512.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg2e512.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x38]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 38 <unknown>
-vlseg4e256.v v8, (a0)
-# CHECK-INST: vlseg4e256.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x72]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 72 <unknown>
+vlsseg2e512.v v8, (a0), a1
+# CHECK-INST: vlsseg2e512.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x3a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 3a <unknown>
-vlseg4e512.v v8, (a0)
-# CHECK-INST: vlseg4e512.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x72]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 72 <unknown>
+vlsseg2e1024.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg2e1024.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x38]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 38 <unknown>
-vlseg4e1024.v v8, (a0)
-# CHECK-INST: vlseg4e1024.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x72]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 72 <unknown>
+vlsseg2e1024.v v8, (a0), a1
+# CHECK-INST: vlsseg2e1024.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x3a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 3a <unknown>
-vlseg4e8.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e8.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x60]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 60 <unknown>
+vluxseg2ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg2ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x24]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 24 <unknown>
+
+vluxseg2ei8.v v8, (a0), v4
+# CHECK-INST: vluxseg2ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x26]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 26 <unknown>
+
+vluxseg2ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg2ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x24]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 24 <unknown>
+
+vluxseg2ei16.v v8, (a0), v4
+# CHECK-INST: vluxseg2ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x26]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 26 <unknown>
+
+vluxseg2ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg2ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x24]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 24 <unknown>
+
+vluxseg2ei32.v v8, (a0), v4
+# CHECK-INST: vluxseg2ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x26]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 26 <unknown>
+
+vluxseg2ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg2ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x24]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 24 <unknown>
+
+vluxseg2ei64.v v8, (a0), v4
+# CHECK-INST: vluxseg2ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x26]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 26 <unknown>
+
+vloxseg2ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg2ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x2c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 2c <unknown>
-vlseg4e16.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e16.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x60]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 60 <unknown>
+vloxseg2ei8.v v8, (a0), v4
+# CHECK-INST: vloxseg2ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x2e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 2e <unknown>
-vlseg4e32.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e32.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x60]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 60 <unknown>
+vloxseg2ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg2ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x2c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 2c <unknown>
-vlseg4e64.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e64.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x60]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 60 <unknown>
+vloxseg2ei16.v v8, (a0), v4
+# CHECK-INST: vloxseg2ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x2e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 2e <unknown>
-vlseg4e128.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e128.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x70]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 70 <unknown>
+vloxseg2ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg2ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x2c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 2c <unknown>
-vlseg4e256.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e256.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x70]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 70 <unknown>
+vloxseg2ei32.v v8, (a0), v4
+# CHECK-INST: vloxseg2ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x2e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 2e <unknown>
-vlseg4e512.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e512.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x70]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 70 <unknown>
+vloxseg2ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg2ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x2c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 2c <unknown>
-vlseg4e1024.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e1024.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x70]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 70 <unknown>
+vloxseg2ei64.v v8, (a0), v4
+# CHECK-INST: vloxseg2ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x2e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 2e <unknown>
-vlseg5e8.v v8, (a0)
-# CHECK-INST: vlseg5e8.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x82]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 82 <unknown>
+vlseg3e8.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e8.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x40]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 40 <unknown>
-vlseg5e16.v v8, (a0)
-# CHECK-INST: vlseg5e16.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x82]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 82 <unknown>
+vlseg3e8.v v8, (a0)
+# CHECK-INST: vlseg3e8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x42]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 42 <unknown>
-vlseg5e32.v v8, (a0)
-# CHECK-INST: vlseg5e32.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x82]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 82 <unknown>
+vlseg3e16.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e16.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x40]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 40 <unknown>
-vlseg5e64.v v8, (a0)
-# CHECK-INST: vlseg5e64.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x82]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 82 <unknown>
+vlseg3e16.v v8, (a0)
+# CHECK-INST: vlseg3e16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x42]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 42 <unknown>
-vlseg5e128.v v8, (a0)
-# CHECK-INST: vlseg5e128.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x92]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 92 <unknown>
+vlseg3e32.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e32.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x40]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 40 <unknown>
-vlseg5e256.v v8, (a0)
-# CHECK-INST: vlseg5e256.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x92]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 92 <unknown>
+vlseg3e32.v v8, (a0)
+# CHECK-INST: vlseg3e32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x42]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 42 <unknown>
-vlseg5e512.v v8, (a0)
-# CHECK-INST: vlseg5e512.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x92]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 92 <unknown>
+vlseg3e64.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e64.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x40]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 40 <unknown>
-vlseg5e1024.v v8, (a0)
-# CHECK-INST: vlseg5e1024.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x92]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 92 <unknown>
+vlseg3e64.v v8, (a0)
+# CHECK-INST: vlseg3e64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x42]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 42 <unknown>
-vlseg5e8.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e8.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x80]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 80 <unknown>
+vlseg3e128.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e128.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x50]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 50 <unknown>
-vlseg5e16.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e16.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x80]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 80 <unknown>
+vlseg3e128.v v8, (a0)
+# CHECK-INST: vlseg3e128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x52]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 52 <unknown>
-vlseg5e32.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e32.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x80]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 80 <unknown>
+vlseg3e256.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e256.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x50]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 50 <unknown>
-vlseg5e64.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e64.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x80]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 80 <unknown>
+vlseg3e256.v v8, (a0)
+# CHECK-INST: vlseg3e256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x52]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 52 <unknown>
-vlseg5e128.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e128.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x90]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 90 <unknown>
+vlseg3e512.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e512.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x50]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 50 <unknown>
-vlseg5e256.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e256.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x90]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 90 <unknown>
+vlseg3e512.v v8, (a0)
+# CHECK-INST: vlseg3e512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x52]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 52 <unknown>
-vlseg5e512.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e512.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x90]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 90 <unknown>
+vlseg3e1024.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e1024.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x50]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 50 <unknown>
-vlseg5e1024.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e1024.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x90]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 90 <unknown>
+vlseg3e1024.v v8, (a0)
+# CHECK-INST: vlseg3e1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x52]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 52 <unknown>
-vlseg6e8.v v8, (a0)
-# CHECK-INST: vlseg6e8.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xa2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 a2 <unknown>
+vlseg3e8ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e8ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x41]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 41 <unknown>
-vlseg6e16.v v8, (a0)
-# CHECK-INST: vlseg6e16.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xa2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 a2 <unknown>
+vlseg3e8ff.v v8, (a0)
+# CHECK-INST: vlseg3e8ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x43]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 43 <unknown>
-vlseg6e32.v v8, (a0)
-# CHECK-INST: vlseg6e32.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xa2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 a2 <unknown>
+vlseg3e16ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e16ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x41]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 41 <unknown>
-vlseg6e64.v v8, (a0)
-# CHECK-INST: vlseg6e64.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xa2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 a2 <unknown>
+vlseg3e16ff.v v8, (a0)
+# CHECK-INST: vlseg3e16ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x43]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 43 <unknown>
-vlseg6e128.v v8, (a0)
-# CHECK-INST: vlseg6e128.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xb2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 b2 <unknown>
+vlseg3e32ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e32ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x41]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 41 <unknown>
-vlseg6e256.v v8, (a0)
-# CHECK-INST: vlseg6e256.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xb2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 b2 <unknown>
+vlseg3e32ff.v v8, (a0)
+# CHECK-INST: vlseg3e32ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x43]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 43 <unknown>
-vlseg6e512.v v8, (a0)
-# CHECK-INST: vlseg6e512.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xb2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 b2 <unknown>
+vlseg3e64ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e64ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x41]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 41 <unknown>
-vlseg6e1024.v v8, (a0)
-# CHECK-INST: vlseg6e1024.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xb2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 b2 <unknown>
+vlseg3e64ff.v v8, (a0)
+# CHECK-INST: vlseg3e64ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x43]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 43 <unknown>
-vlseg6e8.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e8.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xa0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 a0 <unknown>
+vlseg3e128ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e128ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x51]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 51 <unknown>
-vlseg6e16.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e16.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xa0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 a0 <unknown>
+vlseg3e128ff.v v8, (a0)
+# CHECK-INST: vlseg3e128ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x53]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 53 <unknown>
-vlseg6e32.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e32.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xa0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 a0 <unknown>
+vlseg3e256ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e256ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x51]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 51 <unknown>
-vlseg6e64.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e64.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xa0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 a0 <unknown>
+vlseg3e256ff.v v8, (a0)
+# CHECK-INST: vlseg3e256ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x53]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 53 <unknown>
-vlseg6e128.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e128.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xb0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 b0 <unknown>
+vlseg3e512ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e512ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x51]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 51 <unknown>
-vlseg6e256.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e256.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xb0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 b0 <unknown>
+vlseg3e512ff.v v8, (a0)
+# CHECK-INST: vlseg3e512ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x53]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 53 <unknown>
-vlseg6e512.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e512.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xb0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 b0 <unknown>
+vlseg3e1024ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg3e1024ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x51]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 51 <unknown>
-vlseg6e1024.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e1024.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xb0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 b0 <unknown>
+vlseg3e1024ff.v v8, (a0)
+# CHECK-INST: vlseg3e1024ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x53]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 53 <unknown>
-vlseg7e8.v v8, (a0)
-# CHECK-INST: vlseg7e8.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xc2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 c2 <unknown>
+vlsseg3e8.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg3e8.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x48]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 48 <unknown>
-vlseg7e16.v v8, (a0)
-# CHECK-INST: vlseg7e16.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xc2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 c2 <unknown>
+vlsseg3e8.v v8, (a0), a1
+# CHECK-INST: vlsseg3e8.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x4a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 4a <unknown>
-vlseg7e32.v v8, (a0)
-# CHECK-INST: vlseg7e32.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xc2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 c2 <unknown>
+vlsseg3e16.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg3e16.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x48]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 48 <unknown>
-vlseg7e64.v v8, (a0)
-# CHECK-INST: vlseg7e64.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xc2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 c2 <unknown>
+vlsseg3e16.v v8, (a0), a1
+# CHECK-INST: vlsseg3e16.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x4a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 4a <unknown>
-vlseg7e128.v v8, (a0)
-# CHECK-INST: vlseg7e128.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xd2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 d2 <unknown>
+vlsseg3e32.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg3e32.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x48]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 48 <unknown>
-vlseg7e256.v v8, (a0)
-# CHECK-INST: vlseg7e256.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xd2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 d2 <unknown>
+vlsseg3e32.v v8, (a0), a1
+# CHECK-INST: vlsseg3e32.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x4a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 4a <unknown>
-vlseg7e512.v v8, (a0)
-# CHECK-INST: vlseg7e512.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xd2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 d2 <unknown>
+vlsseg3e64.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg3e64.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x48]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 48 <unknown>
-vlseg7e1024.v v8, (a0)
-# CHECK-INST: vlseg7e1024.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xd2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 d2 <unknown>
+vlsseg3e64.v v8, (a0), a1
+# CHECK-INST: vlsseg3e64.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x4a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 4a <unknown>
-vlseg7e8.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e8.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xc0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 c0 <unknown>
+vlsseg3e128.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg3e128.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 58 <unknown>
-vlseg7e16.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e16.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xc0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 c0 <unknown>
+vlsseg3e128.v v8, (a0), a1
+# CHECK-INST: vlsseg3e128.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 5a <unknown>
-vlseg7e32.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e32.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xc0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 c0 <unknown>
+vlsseg3e256.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg3e256.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 58 <unknown>
-vlseg7e64.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e64.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xc0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 c0 <unknown>
+vlsseg3e256.v v8, (a0), a1
+# CHECK-INST: vlsseg3e256.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 5a <unknown>
-vlseg7e128.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e128.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xd0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 d0 <unknown>
+vlsseg3e512.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg3e512.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 58 <unknown>
-vlseg7e256.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e256.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xd0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 d0 <unknown>
+vlsseg3e512.v v8, (a0), a1
+# CHECK-INST: vlsseg3e512.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 5a <unknown>
-vlseg7e512.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e512.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xd0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 d0 <unknown>
+vlsseg3e1024.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg3e1024.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 58 <unknown>
-vlseg7e1024.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e1024.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xd0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 d0 <unknown>
+vlsseg3e1024.v v8, (a0), a1
+# CHECK-INST: vlsseg3e1024.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 5a <unknown>
-vlseg8e8.v v8, (a0)
-# CHECK-INST: vlseg8e8.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xe2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 e2 <unknown>
+vluxseg3ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg3ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 44 <unknown>
+
+vluxseg3ei8.v v8, (a0), v4
+# CHECK-INST: vluxseg3ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 46 <unknown>
+
+vluxseg3ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg3ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 44 <unknown>
+
+vluxseg3ei16.v v8, (a0), v4
+# CHECK-INST: vluxseg3ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 46 <unknown>
+
+vluxseg3ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg3ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 44 <unknown>
+
+vluxseg3ei32.v v8, (a0), v4
+# CHECK-INST: vluxseg3ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 46 <unknown>
+
+vluxseg3ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg3ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 44 <unknown>
+
+vluxseg3ei64.v v8, (a0), v4
+# CHECK-INST: vluxseg3ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 46 <unknown>
+
+vloxseg3ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg3ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 4c <unknown>
-vlseg8e16.v v8, (a0)
-# CHECK-INST: vlseg8e16.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xe2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 e2 <unknown>
+vloxseg3ei8.v v8, (a0), v4
+# CHECK-INST: vloxseg3ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 4e <unknown>
-vlseg8e32.v v8, (a0)
-# CHECK-INST: vlseg8e32.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xe2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 e2 <unknown>
+vloxseg3ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg3ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 4c <unknown>
-vlseg8e64.v v8, (a0)
-# CHECK-INST: vlseg8e64.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xe2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 e2 <unknown>
+vloxseg3ei16.v v8, (a0), v4
+# CHECK-INST: vloxseg3ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 4e <unknown>
-vlseg8e128.v v8, (a0)
-# CHECK-INST: vlseg8e128.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xf2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 f2 <unknown>
+vloxseg3ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg3ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 4c <unknown>
-vlseg8e256.v v8, (a0)
-# CHECK-INST: vlseg8e256.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xf2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 f2 <unknown>
+vloxseg3ei32.v v8, (a0), v4
+# CHECK-INST: vloxseg3ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 4e <unknown>
-vlseg8e512.v v8, (a0)
-# CHECK-INST: vlseg8e512.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xf2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 f2 <unknown>
+vloxseg3ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg3ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 4c <unknown>
-vlseg8e1024.v v8, (a0)
-# CHECK-INST: vlseg8e1024.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xf2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 f2 <unknown>
+vloxseg3ei64.v v8, (a0), v4
+# CHECK-INST: vloxseg3ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 4e <unknown>
-vlseg8e8.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e8.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xe0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 e0 <unknown>
+vlseg4e8.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e8.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x60]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 60 <unknown>
-vlseg8e16.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e16.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xe0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 e0 <unknown>
+vlseg4e8.v v8, (a0)
+# CHECK-INST: vlseg4e8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x62]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 62 <unknown>
-vlseg8e32.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e32.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xe0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 e0 <unknown>
+vlseg4e16.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e16.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x60]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 60 <unknown>
-vlseg8e64.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e64.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xe0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 e0 <unknown>
+vlseg4e16.v v8, (a0)
+# CHECK-INST: vlseg4e16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x62]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 62 <unknown>
-vlseg8e128.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e128.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xf0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 f0 <unknown>
+vlseg4e32.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e32.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x60]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 60 <unknown>
-vlseg8e256.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e256.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xf0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 f0 <unknown>
+vlseg4e32.v v8, (a0)
+# CHECK-INST: vlseg4e32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x62]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 62 <unknown>
-vlseg8e512.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e512.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xf0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 f0 <unknown>
+vlseg4e64.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e64.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x60]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 60 <unknown>
-vlseg8e1024.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e1024.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xf0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 f0 <unknown>
+vlseg4e64.v v8, (a0)
+# CHECK-INST: vlseg4e64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x62]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 62 <unknown>
-vlsseg2e8.v v8, (a0), a1
-# CHECK-INST: vlsseg2e8.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x2a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 2a <unknown>
+vlseg4e128.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e128.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x70]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 70 <unknown>
-vlsseg2e16.v v8, (a0), a1
-# CHECK-INST: vlsseg2e16.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x2a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 2a <unknown>
+vlseg4e128.v v8, (a0)
+# CHECK-INST: vlseg4e128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x72]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 72 <unknown>
-vlsseg2e32.v v8, (a0), a1
-# CHECK-INST: vlsseg2e32.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x2a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 2a <unknown>
+vlseg4e256.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e256.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x70]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 70 <unknown>
-vlsseg2e64.v v8, (a0), a1
-# CHECK-INST: vlsseg2e64.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x2a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 2a <unknown>
+vlseg4e256.v v8, (a0)
+# CHECK-INST: vlseg4e256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x72]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 72 <unknown>
-vlsseg2e128.v v8, (a0), a1
-# CHECK-INST: vlsseg2e128.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x3a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 3a <unknown>
+vlseg4e512.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e512.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x70]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 70 <unknown>
-vlsseg2e256.v v8, (a0), a1
-# CHECK-INST: vlsseg2e256.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x3a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 3a <unknown>
+vlseg4e512.v v8, (a0)
+# CHECK-INST: vlseg4e512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x72]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 72 <unknown>
-vlsseg2e512.v v8, (a0), a1
-# CHECK-INST: vlsseg2e512.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x3a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 3a <unknown>
+vlseg4e1024.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e1024.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x70]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 70 <unknown>
-vlsseg2e1024.v v8, (a0), a1
-# CHECK-INST: vlsseg2e1024.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x3a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 3a <unknown>
+vlseg4e1024.v v8, (a0)
+# CHECK-INST: vlseg4e1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x72]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 72 <unknown>
-vlsseg2e8.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg2e8.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x28]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 28 <unknown>
+vlseg4e8ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e8ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x61]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 61 <unknown>
-vlsseg2e16.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg2e16.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x28]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 28 <unknown>
+vlseg4e8ff.v v8, (a0)
+# CHECK-INST: vlseg4e8ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x63]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 63 <unknown>
-vlsseg2e32.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg2e32.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x28]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 28 <unknown>
+vlseg4e16ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e16ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x61]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 61 <unknown>
-vlsseg2e64.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg2e64.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x28]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 28 <unknown>
+vlseg4e16ff.v v8, (a0)
+# CHECK-INST: vlseg4e16ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x63]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 63 <unknown>
-vlsseg2e128.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg2e128.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x38]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 38 <unknown>
+vlseg4e32ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e32ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x61]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 61 <unknown>
-vlsseg2e256.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg2e256.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x38]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 38 <unknown>
+vlseg4e32ff.v v8, (a0)
+# CHECK-INST: vlseg4e32ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x63]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 63 <unknown>
-vlsseg2e512.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg2e512.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x38]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 38 <unknown>
+vlseg4e64ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e64ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x61]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 61 <unknown>
-vlsseg2e1024.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg2e1024.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x38]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 38 <unknown>
+vlseg4e64ff.v v8, (a0)
+# CHECK-INST: vlseg4e64ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x63]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 63 <unknown>
-vlsseg3e8.v v8, (a0), a1
-# CHECK-INST: vlsseg3e8.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x4a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 4a <unknown>
+vlseg4e128ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e128ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x71]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 71 <unknown>
-vlsseg3e16.v v8, (a0), a1
-# CHECK-INST: vlsseg3e16.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x4a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 4a <unknown>
+vlseg4e128ff.v v8, (a0)
+# CHECK-INST: vlseg4e128ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x73]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 73 <unknown>
-vlsseg3e32.v v8, (a0), a1
-# CHECK-INST: vlsseg3e32.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x4a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 4a <unknown>
+vlseg4e256ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e256ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x71]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 71 <unknown>
-vlsseg3e64.v v8, (a0), a1
-# CHECK-INST: vlsseg3e64.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x4a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 4a <unknown>
+vlseg4e256ff.v v8, (a0)
+# CHECK-INST: vlseg4e256ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x73]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 73 <unknown>
-vlsseg3e128.v v8, (a0), a1
-# CHECK-INST: vlsseg3e128.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x5a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 5a <unknown>
+vlseg4e512ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e512ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x71]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 71 <unknown>
-vlsseg3e256.v v8, (a0), a1
-# CHECK-INST: vlsseg3e256.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x5a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 5a <unknown>
+vlseg4e512ff.v v8, (a0)
+# CHECK-INST: vlseg4e512ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x73]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 73 <unknown>
-vlsseg3e512.v v8, (a0), a1
-# CHECK-INST: vlsseg3e512.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x5a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 5a <unknown>
+vlseg4e1024ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg4e1024ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x71]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 71 <unknown>
-vlsseg3e1024.v v8, (a0), a1
-# CHECK-INST: vlsseg3e1024.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x5a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 5a <unknown>
-
-vlsseg3e8.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg3e8.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x48]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 48 <unknown>
-
-vlsseg3e16.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg3e16.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x48]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 48 <unknown>
-
-vlsseg3e32.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg3e32.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x48]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 48 <unknown>
-
-vlsseg3e64.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg3e64.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x48]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 48 <unknown>
-
-vlsseg3e128.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg3e128.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x58]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 58 <unknown>
-
-vlsseg3e256.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg3e256.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x58]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 58 <unknown>
-
-vlsseg3e512.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg3e512.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x58]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 58 <unknown>
+vlseg4e1024ff.v v8, (a0)
+# CHECK-INST: vlseg4e1024ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x73]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 73 <unknown>
-vlsseg3e1024.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg3e1024.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x58]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 58 <unknown>
+vlsseg4e8.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg4e8.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x68]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 68 <unknown>
vlsseg4e8.v v8, (a0), a1
# CHECK-INST: vlsseg4e8.v v8, (a0), a1
# CHECK-ENCODING: [0x07,0x04,0xb5,0x6a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 b5 6a <unknown>
+vlsseg4e16.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg4e16.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x68]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 68 <unknown>
+
vlsseg4e16.v v8, (a0), a1
# CHECK-INST: vlsseg4e16.v v8, (a0), a1
# CHECK-ENCODING: [0x07,0x54,0xb5,0x6a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 b5 6a <unknown>
+vlsseg4e32.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg4e32.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x68]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 68 <unknown>
+
vlsseg4e32.v v8, (a0), a1
# CHECK-INST: vlsseg4e32.v v8, (a0), a1
# CHECK-ENCODING: [0x07,0x64,0xb5,0x6a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 b5 6a <unknown>
+vlsseg4e64.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg4e64.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x68]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 68 <unknown>
+
vlsseg4e64.v v8, (a0), a1
# CHECK-INST: vlsseg4e64.v v8, (a0), a1
# CHECK-ENCODING: [0x07,0x74,0xb5,0x6a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 b5 6a <unknown>
+vlsseg4e128.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg4e128.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x78]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 78 <unknown>
+
vlsseg4e128.v v8, (a0), a1
# CHECK-INST: vlsseg4e128.v v8, (a0), a1
# CHECK-ENCODING: [0x07,0x04,0xb5,0x7a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 b5 7a <unknown>
+vlsseg4e256.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg4e256.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x78]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 78 <unknown>
+
vlsseg4e256.v v8, (a0), a1
# CHECK-INST: vlsseg4e256.v v8, (a0), a1
# CHECK-ENCODING: [0x07,0x54,0xb5,0x7a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 b5 7a <unknown>
+vlsseg4e512.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg4e512.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x78]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 78 <unknown>
+
vlsseg4e512.v v8, (a0), a1
# CHECK-INST: vlsseg4e512.v v8, (a0), a1
# CHECK-ENCODING: [0x07,0x64,0xb5,0x7a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 b5 7a <unknown>
+vlsseg4e1024.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg4e1024.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x78]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 78 <unknown>
+
vlsseg4e1024.v v8, (a0), a1
# CHECK-INST: vlsseg4e1024.v v8, (a0), a1
# CHECK-ENCODING: [0x07,0x74,0xb5,0x7a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 b5 7a <unknown>
-vlsseg4e8.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg4e8.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x68]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 68 <unknown>
+vluxseg4ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg4ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x64]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 64 <unknown>
+
+vluxseg4ei8.v v8, (a0), v4
+# CHECK-INST: vluxseg4ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x66]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 66 <unknown>
+
+vluxseg4ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg4ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x64]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 64 <unknown>
+
+vluxseg4ei16.v v8, (a0), v4
+# CHECK-INST: vluxseg4ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x66]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 66 <unknown>
+
+vluxseg4ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg4ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x64]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 64 <unknown>
+
+vluxseg4ei32.v v8, (a0), v4
+# CHECK-INST: vluxseg4ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x66]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 66 <unknown>
+
+vluxseg4ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg4ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x64]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 64 <unknown>
+
+vluxseg4ei64.v v8, (a0), v4
+# CHECK-INST: vluxseg4ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x66]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 66 <unknown>
+
+vloxseg4ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg4ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x6c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 6c <unknown>
-vlsseg4e16.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg4e16.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x68]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 68 <unknown>
+vloxseg4ei8.v v8, (a0), v4
+# CHECK-INST: vloxseg4ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x6e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 6e <unknown>
-vlsseg4e32.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg4e32.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x68]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 68 <unknown>
+vloxseg4ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg4ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x6c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 6c <unknown>
-vlsseg4e64.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg4e64.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x68]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 68 <unknown>
+vloxseg4ei16.v v8, (a0), v4
+# CHECK-INST: vloxseg4ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x6e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 6e <unknown>
-vlsseg4e128.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg4e128.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x78]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 78 <unknown>
+vloxseg4ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg4ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x6c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 6c <unknown>
-vlsseg4e256.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg4e256.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x78]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 78 <unknown>
+vloxseg4ei32.v v8, (a0), v4
+# CHECK-INST: vloxseg4ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x6e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 6e <unknown>
-vlsseg4e512.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg4e512.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x78]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 78 <unknown>
+vloxseg4ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg4ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x6c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 6c <unknown>
-vlsseg4e1024.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg4e1024.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x78]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 78 <unknown>
+vloxseg4ei64.v v8, (a0), v4
+# CHECK-INST: vloxseg4ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x6e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 6e <unknown>
-vlsseg5e8.v v8, (a0), a1
-# CHECK-INST: vlsseg5e8.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x8a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 8a <unknown>
+vlseg5e8.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e8.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x80]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 80 <unknown>
-vlsseg5e16.v v8, (a0), a1
-# CHECK-INST: vlsseg5e16.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x8a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 8a <unknown>
+vlseg5e8.v v8, (a0)
+# CHECK-INST: vlseg5e8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 82 <unknown>
-vlsseg5e32.v v8, (a0), a1
-# CHECK-INST: vlsseg5e32.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x8a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 8a <unknown>
+vlseg5e16.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e16.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x80]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 80 <unknown>
-vlsseg5e64.v v8, (a0), a1
-# CHECK-INST: vlsseg5e64.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x8a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 8a <unknown>
+vlseg5e16.v v8, (a0)
+# CHECK-INST: vlseg5e16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 82 <unknown>
-vlsseg5e128.v v8, (a0), a1
-# CHECK-INST: vlsseg5e128.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x9a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 9a <unknown>
+vlseg5e32.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e32.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x80]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 80 <unknown>
-vlsseg5e256.v v8, (a0), a1
-# CHECK-INST: vlsseg5e256.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x9a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 9a <unknown>
+vlseg5e32.v v8, (a0)
+# CHECK-INST: vlseg5e32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 82 <unknown>
-vlsseg5e512.v v8, (a0), a1
-# CHECK-INST: vlsseg5e512.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x9a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 9a <unknown>
+vlseg5e64.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e64.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x80]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 80 <unknown>
-vlsseg5e1024.v v8, (a0), a1
-# CHECK-INST: vlsseg5e1024.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x9a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 9a <unknown>
+vlseg5e64.v v8, (a0)
+# CHECK-INST: vlseg5e64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 82 <unknown>
-vlsseg5e8.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg5e8.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x88]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 88 <unknown>
+vlseg5e128.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e128.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x90]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 90 <unknown>
-vlsseg5e16.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg5e16.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x88]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 88 <unknown>
+vlseg5e128.v v8, (a0)
+# CHECK-INST: vlseg5e128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0x92]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 92 <unknown>
-vlsseg5e32.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg5e32.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x88]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 88 <unknown>
+vlseg5e256.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e256.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x90]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 90 <unknown>
-vlsseg5e64.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg5e64.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x88]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 88 <unknown>
+vlseg5e256.v v8, (a0)
+# CHECK-INST: vlseg5e256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0x92]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 92 <unknown>
-vlsseg5e128.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg5e128.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0x98]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 98 <unknown>
+vlseg5e512.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e512.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x90]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 90 <unknown>
-vlsseg5e256.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg5e256.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0x98]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 98 <unknown>
+vlseg5e512.v v8, (a0)
+# CHECK-INST: vlseg5e512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0x92]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 92 <unknown>
-vlsseg5e512.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg5e512.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0x98]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 98 <unknown>
+vlseg5e1024.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e1024.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x90]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 90 <unknown>
-vlsseg5e1024.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg5e1024.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0x98]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 98 <unknown>
-
-vlsseg6e8.v v8, (a0), a1
-# CHECK-INST: vlsseg6e8.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xaa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 aa <unknown>
-
-vlsseg6e16.v v8, (a0), a1
-# CHECK-INST: vlsseg6e16.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xaa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 aa <unknown>
-
-vlsseg6e32.v v8, (a0), a1
-# CHECK-INST: vlsseg6e32.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xaa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 aa <unknown>
-
-vlsseg6e64.v v8, (a0), a1
-# CHECK-INST: vlsseg6e64.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xaa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 aa <unknown>
-
-vlsseg6e128.v v8, (a0), a1
-# CHECK-INST: vlsseg6e128.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xba]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 ba <unknown>
-
-vlsseg6e256.v v8, (a0), a1
-# CHECK-INST: vlsseg6e256.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xba]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 ba <unknown>
-
-vlsseg6e512.v v8, (a0), a1
-# CHECK-INST: vlsseg6e512.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xba]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 ba <unknown>
-
-vlsseg6e1024.v v8, (a0), a1
-# CHECK-INST: vlsseg6e1024.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xba]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 ba <unknown>
-
-vlsseg6e8.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg6e8.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xa8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 a8 <unknown>
-
-vlsseg6e16.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg6e16.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xa8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 a8 <unknown>
-
-vlsseg6e32.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg6e32.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xa8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 a8 <unknown>
-
-vlsseg6e64.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg6e64.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xa8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 a8 <unknown>
-
-vlsseg6e128.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg6e128.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xb8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 b8 <unknown>
-
-vlsseg6e256.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg6e256.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xb8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 b8 <unknown>
-
-vlsseg6e512.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg6e512.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xb8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 b8 <unknown>
-
-vlsseg6e1024.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg6e1024.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xb8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 b8 <unknown>
-
-vlsseg7e8.v v8, (a0), a1
-# CHECK-INST: vlsseg7e8.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xca]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 ca <unknown>
-
-vlsseg7e16.v v8, (a0), a1
-# CHECK-INST: vlsseg7e16.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xca]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 ca <unknown>
-
-vlsseg7e32.v v8, (a0), a1
-# CHECK-INST: vlsseg7e32.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xca]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 ca <unknown>
-
-vlsseg7e64.v v8, (a0), a1
-# CHECK-INST: vlsseg7e64.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xca]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 ca <unknown>
-
-vlsseg7e128.v v8, (a0), a1
-# CHECK-INST: vlsseg7e128.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xda]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 da <unknown>
-
-vlsseg7e256.v v8, (a0), a1
-# CHECK-INST: vlsseg7e256.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xda]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 da <unknown>
-
-vlsseg7e512.v v8, (a0), a1
-# CHECK-INST: vlsseg7e512.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xda]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 da <unknown>
-
-vlsseg7e1024.v v8, (a0), a1
-# CHECK-INST: vlsseg7e1024.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xda]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 da <unknown>
-
-vlsseg7e8.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg7e8.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xc8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 c8 <unknown>
-
-vlsseg7e16.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg7e16.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xc8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 c8 <unknown>
-
-vlsseg7e32.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg7e32.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xc8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 c8 <unknown>
-
-vlsseg7e64.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg7e64.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xc8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 c8 <unknown>
-
-vlsseg7e128.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg7e128.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xd8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 d8 <unknown>
-
-vlsseg7e256.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg7e256.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xd8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 d8 <unknown>
-
-vlsseg7e512.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg7e512.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xd8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 d8 <unknown>
-
-vlsseg7e1024.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg7e1024.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xd8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 d8 <unknown>
-
-vlsseg8e8.v v8, (a0), a1
-# CHECK-INST: vlsseg8e8.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xea]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 ea <unknown>
-
-vlsseg8e16.v v8, (a0), a1
-# CHECK-INST: vlsseg8e16.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xea]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 ea <unknown>
-
-vlsseg8e32.v v8, (a0), a1
-# CHECK-INST: vlsseg8e32.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xea]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 ea <unknown>
-
-vlsseg8e64.v v8, (a0), a1
-# CHECK-INST: vlsseg8e64.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xea]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 ea <unknown>
-
-vlsseg8e128.v v8, (a0), a1
-# CHECK-INST: vlsseg8e128.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xfa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 fa <unknown>
-
-vlsseg8e256.v v8, (a0), a1
-# CHECK-INST: vlsseg8e256.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xfa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 fa <unknown>
-
-vlsseg8e512.v v8, (a0), a1
-# CHECK-INST: vlsseg8e512.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xfa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 fa <unknown>
-
-vlsseg8e1024.v v8, (a0), a1
-# CHECK-INST: vlsseg8e1024.v v8, (a0), a1
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xfa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 fa <unknown>
-
-vlsseg8e8.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg8e8.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xe8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 e8 <unknown>
-
-vlsseg8e16.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg8e16.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xe8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 e8 <unknown>
-
-vlsseg8e32.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg8e32.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xe8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 e8 <unknown>
-
-vlsseg8e64.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg8e64.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xe8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 e8 <unknown>
-
-vlsseg8e128.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg8e128.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x04,0xb5,0xf8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 b5 f8 <unknown>
-
-vlsseg8e256.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg8e256.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x54,0xb5,0xf8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 b5 f8 <unknown>
-
-vlsseg8e512.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg8e512.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x64,0xb5,0xf8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 b5 f8 <unknown>
-
-vlsseg8e1024.v v8, (a0), a1, v0.t
-# CHECK-INST: vlsseg8e1024.v v8, (a0), a1, v0.t
-# CHECK-ENCODING: [0x07,0x74,0xb5,0xf8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 b5 f8 <unknown>
-
-vlxseg2ei8.v v8, (a0), v4
-# CHECK-INST: vlxseg2ei8.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x2e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 2e <unknown>
-
-vlxseg2ei16.v v8, (a0), v4
-# CHECK-INST: vlxseg2ei16.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x2e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 2e <unknown>
-
-vlxseg2ei32.v v8, (a0), v4
-# CHECK-INST: vlxseg2ei32.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x2e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 2e <unknown>
-
-vlxseg2ei64.v v8, (a0), v4
-# CHECK-INST: vlxseg2ei64.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x2e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 2e <unknown>
-
-vlxseg2ei128.v v8, (a0), v4
-# CHECK-INST: vlxseg2ei128.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x3e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 3e <unknown>
-
-vlxseg2ei256.v v8, (a0), v4
-# CHECK-INST: vlxseg2ei256.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x3e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 3e <unknown>
-
-vlxseg2ei512.v v8, (a0), v4
-# CHECK-INST: vlxseg2ei512.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x3e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 3e <unknown>
-
-vlxseg2ei1024.v v8, (a0), v4
-# CHECK-INST: vlxseg2ei1024.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x3e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 3e <unknown>
-
-vlxseg2ei8.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg2ei8.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x2c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 2c <unknown>
-
-vlxseg2ei16.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg2ei16.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x2c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 2c <unknown>
-
-vlxseg2ei32.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg2ei32.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x2c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 2c <unknown>
-
-vlxseg2ei64.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg2ei64.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x2c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 2c <unknown>
-
-vlxseg2ei128.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg2ei128.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x3c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 3c <unknown>
-
-vlxseg2ei256.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg2ei256.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x3c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 3c <unknown>
-
-vlxseg2ei512.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg2ei512.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x3c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 3c <unknown>
-
-vlxseg2ei1024.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg2ei1024.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x3c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 3c <unknown>
-
-vlxseg3ei8.v v8, (a0), v4
-# CHECK-INST: vlxseg3ei8.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x4e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 4e <unknown>
-
-vlxseg3ei16.v v8, (a0), v4
-# CHECK-INST: vlxseg3ei16.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x4e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 4e <unknown>
-
-vlxseg3ei32.v v8, (a0), v4
-# CHECK-INST: vlxseg3ei32.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x4e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 4e <unknown>
-
-vlxseg3ei64.v v8, (a0), v4
-# CHECK-INST: vlxseg3ei64.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x4e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 4e <unknown>
-
-vlxseg3ei128.v v8, (a0), v4
-# CHECK-INST: vlxseg3ei128.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x5e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 5e <unknown>
-
-vlxseg3ei256.v v8, (a0), v4
-# CHECK-INST: vlxseg3ei256.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x5e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 5e <unknown>
-
-vlxseg3ei512.v v8, (a0), v4
-# CHECK-INST: vlxseg3ei512.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x5e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 5e <unknown>
-
-vlxseg3ei1024.v v8, (a0), v4
-# CHECK-INST: vlxseg3ei1024.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x5e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 5e <unknown>
-
-vlxseg3ei8.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg3ei8.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x4c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 4c <unknown>
-
-vlxseg3ei16.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg3ei16.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x4c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 4c <unknown>
-
-vlxseg3ei32.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg3ei32.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x4c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 4c <unknown>
-
-vlxseg3ei64.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg3ei64.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x4c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 4c <unknown>
-
-vlxseg3ei128.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg3ei128.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x5c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 5c <unknown>
-
-vlxseg3ei256.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg3ei256.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x5c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 5c <unknown>
-
-vlxseg3ei512.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg3ei512.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x5c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 5c <unknown>
-
-vlxseg3ei1024.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg3ei1024.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x5c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 5c <unknown>
-
-vlxseg4ei8.v v8, (a0), v4
-# CHECK-INST: vlxseg4ei8.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x6e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 6e <unknown>
-
-vlxseg4ei16.v v8, (a0), v4
-# CHECK-INST: vlxseg4ei16.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x6e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 6e <unknown>
-
-vlxseg4ei32.v v8, (a0), v4
-# CHECK-INST: vlxseg4ei32.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x6e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 6e <unknown>
-
-vlxseg4ei64.v v8, (a0), v4
-# CHECK-INST: vlxseg4ei64.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x6e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 6e <unknown>
-
-vlxseg4ei128.v v8, (a0), v4
-# CHECK-INST: vlxseg4ei128.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 7e <unknown>
-
-vlxseg4ei256.v v8, (a0), v4
-# CHECK-INST: vlxseg4ei256.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 7e <unknown>
-
-vlxseg4ei512.v v8, (a0), v4
-# CHECK-INST: vlxseg4ei512.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 7e <unknown>
-
-vlxseg4ei1024.v v8, (a0), v4
-# CHECK-INST: vlxseg4ei1024.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 7e <unknown>
-
-vlxseg4ei8.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg4ei8.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x6c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 6c <unknown>
-
-vlxseg4ei16.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg4ei16.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x6c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 6c <unknown>
-
-vlxseg4ei32.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg4ei32.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x6c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 6c <unknown>
-
-vlxseg4ei64.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg4ei64.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x6c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 6c <unknown>
-
-vlxseg4ei128.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg4ei128.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x7c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 7c <unknown>
-
-vlxseg4ei256.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg4ei256.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x7c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 7c <unknown>
-
-vlxseg4ei512.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg4ei512.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x7c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 7c <unknown>
-
-vlxseg4ei1024.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg4ei1024.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x7c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 7c <unknown>
-
-vlxseg5ei8.v v8, (a0), v4
-# CHECK-INST: vlxseg5ei8.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x8e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 8e <unknown>
-
-vlxseg5ei16.v v8, (a0), v4
-# CHECK-INST: vlxseg5ei16.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x8e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 8e <unknown>
-
-vlxseg5ei32.v v8, (a0), v4
-# CHECK-INST: vlxseg5ei32.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x8e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 8e <unknown>
-
-vlxseg5ei64.v v8, (a0), v4
-# CHECK-INST: vlxseg5ei64.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x8e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 8e <unknown>
-
-vlxseg5ei128.v v8, (a0), v4
-# CHECK-INST: vlxseg5ei128.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0x9e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 9e <unknown>
-
-vlxseg5ei256.v v8, (a0), v4
-# CHECK-INST: vlxseg5ei256.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0x9e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 9e <unknown>
-
-vlxseg5ei512.v v8, (a0), v4
-# CHECK-INST: vlxseg5ei512.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0x9e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 9e <unknown>
-
-vlxseg5ei1024.v v8, (a0), v4
-# CHECK-INST: vlxseg5ei1024.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0x9e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 9e <unknown>
-
-vlxseg5ei8.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg5ei8.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x8c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 8c <unknown>
-
-vlxseg5ei16.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg5ei16.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x8c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 8c <unknown>
-
-vlxseg5ei32.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg5ei32.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x8c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 8c <unknown>
-
-vlxseg5ei64.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg5ei64.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x8c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 8c <unknown>
-
-vlxseg5ei128.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg5ei128.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0x9c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 9c <unknown>
-
-vlxseg5ei256.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg5ei256.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0x9c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 9c <unknown>
-
-vlxseg5ei512.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg5ei512.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0x9c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 9c <unknown>
-
-vlxseg5ei1024.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg5ei1024.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0x9c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 9c <unknown>
-
-vlxseg6ei8.v v8, (a0), v4
-# CHECK-INST: vlxseg6ei8.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0xae]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 ae <unknown>
-
-vlxseg6ei16.v v8, (a0), v4
-# CHECK-INST: vlxseg6ei16.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0xae]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 ae <unknown>
-
-vlxseg6ei32.v v8, (a0), v4
-# CHECK-INST: vlxseg6ei32.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0xae]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 ae <unknown>
-
-vlxseg6ei64.v v8, (a0), v4
-# CHECK-INST: vlxseg6ei64.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0xae]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 ae <unknown>
-
-vlxseg6ei128.v v8, (a0), v4
-# CHECK-INST: vlxseg6ei128.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0xbe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 be <unknown>
-
-vlxseg6ei256.v v8, (a0), v4
-# CHECK-INST: vlxseg6ei256.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0xbe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 be <unknown>
-
-vlxseg6ei512.v v8, (a0), v4
-# CHECK-INST: vlxseg6ei512.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0xbe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 be <unknown>
-
-vlxseg6ei1024.v v8, (a0), v4
-# CHECK-INST: vlxseg6ei1024.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0xbe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 be <unknown>
-
-vlxseg6ei8.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg6ei8.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0xac]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 ac <unknown>
-
-vlxseg6ei16.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg6ei16.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0xac]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 ac <unknown>
-
-vlxseg6ei32.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg6ei32.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0xac]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 ac <unknown>
-
-vlxseg6ei64.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg6ei64.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0xac]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 ac <unknown>
-
-vlxseg6ei128.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg6ei128.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0xbc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 bc <unknown>
-
-vlxseg6ei256.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg6ei256.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0xbc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 bc <unknown>
-
-vlxseg6ei512.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg6ei512.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0xbc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 bc <unknown>
-
-vlxseg6ei1024.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg6ei1024.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0xbc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 bc <unknown>
-
-vlxseg7ei8.v v8, (a0), v4
-# CHECK-INST: vlxseg7ei8.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0xce]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 ce <unknown>
-
-vlxseg7ei16.v v8, (a0), v4
-# CHECK-INST: vlxseg7ei16.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0xce]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 ce <unknown>
-
-vlxseg7ei32.v v8, (a0), v4
-# CHECK-INST: vlxseg7ei32.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0xce]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 ce <unknown>
-
-vlxseg7ei64.v v8, (a0), v4
-# CHECK-INST: vlxseg7ei64.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0xce]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 ce <unknown>
-
-vlxseg7ei128.v v8, (a0), v4
-# CHECK-INST: vlxseg7ei128.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0xde]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 de <unknown>
-
-vlxseg7ei256.v v8, (a0), v4
-# CHECK-INST: vlxseg7ei256.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0xde]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 de <unknown>
-
-vlxseg7ei512.v v8, (a0), v4
-# CHECK-INST: vlxseg7ei512.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0xde]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 de <unknown>
-
-vlxseg7ei1024.v v8, (a0), v4
-# CHECK-INST: vlxseg7ei1024.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0xde]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 de <unknown>
-
-vlxseg7ei8.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg7ei8.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0xcc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 cc <unknown>
-
-vlxseg7ei16.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg7ei16.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0xcc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 cc <unknown>
-
-vlxseg7ei32.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg7ei32.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0xcc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 cc <unknown>
-
-vlxseg7ei64.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg7ei64.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0xcc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 cc <unknown>
-
-vlxseg7ei128.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg7ei128.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0xdc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 dc <unknown>
-
-vlxseg7ei256.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg7ei256.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0xdc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 dc <unknown>
-
-vlxseg7ei512.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg7ei512.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0xdc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 dc <unknown>
-
-vlxseg7ei1024.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg7ei1024.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0xdc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 dc <unknown>
-
-vlxseg8ei8.v v8, (a0), v4
-# CHECK-INST: vlxseg8ei8.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0xee]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 ee <unknown>
-
-vlxseg8ei16.v v8, (a0), v4
-# CHECK-INST: vlxseg8ei16.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0xee]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 ee <unknown>
-
-vlxseg8ei32.v v8, (a0), v4
-# CHECK-INST: vlxseg8ei32.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0xee]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 ee <unknown>
-
-vlxseg8ei64.v v8, (a0), v4
-# CHECK-INST: vlxseg8ei64.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0xee]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 ee <unknown>
-
-vlxseg8ei128.v v8, (a0), v4
-# CHECK-INST: vlxseg8ei128.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x04,0x45,0xfe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 fe <unknown>
-
-vlxseg8ei256.v v8, (a0), v4
-# CHECK-INST: vlxseg8ei256.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x54,0x45,0xfe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 fe <unknown>
-
-vlxseg8ei512.v v8, (a0), v4
-# CHECK-INST: vlxseg8ei512.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x64,0x45,0xfe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 fe <unknown>
-
-vlxseg8ei1024.v v8, (a0), v4
-# CHECK-INST: vlxseg8ei1024.v v8, (a0), v4
-# CHECK-ENCODING: [0x07,0x74,0x45,0xfe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 fe <unknown>
-
-vlxseg8ei8.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg8ei8.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0xec]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 ec <unknown>
-
-vlxseg8ei16.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg8ei16.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0xec]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 ec <unknown>
-
-vlxseg8ei32.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg8ei32.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0xec]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 ec <unknown>
-
-vlxseg8ei64.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg8ei64.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0xec]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 ec <unknown>
-
-vlxseg8ei128.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg8ei128.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x04,0x45,0xfc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 45 fc <unknown>
-
-vlxseg8ei256.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg8ei256.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x54,0x45,0xfc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 45 fc <unknown>
-
-vlxseg8ei512.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg8ei512.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x64,0x45,0xfc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 45 fc <unknown>
-
-vlxseg8ei1024.v v8, (a0), v4, v0.t
-# CHECK-INST: vlxseg8ei1024.v v8, (a0), v4, v0.t
-# CHECK-ENCODING: [0x07,0x74,0x45,0xfc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 45 fc <unknown>
-
-vlseg2e8ff.v v8, (a0)
-# CHECK-INST: vlseg2e8ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x23]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 23 <unknown>
-
-vlseg2e16ff.v v8, (a0)
-# CHECK-INST: vlseg2e16ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x23]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 23 <unknown>
-
-vlseg2e32ff.v v8, (a0)
-# CHECK-INST: vlseg2e32ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x23]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 23 <unknown>
-
-vlseg2e64ff.v v8, (a0)
-# CHECK-INST: vlseg2e64ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x23]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 23 <unknown>
-
-vlseg2e128ff.v v8, (a0)
-# CHECK-INST: vlseg2e128ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x33]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 33 <unknown>
-
-vlseg2e256ff.v v8, (a0)
-# CHECK-INST: vlseg2e256ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x33]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 33 <unknown>
-
-vlseg2e512ff.v v8, (a0)
-# CHECK-INST: vlseg2e512ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x33]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 33 <unknown>
-
-vlseg2e1024ff.v v8, (a0)
-# CHECK-INST: vlseg2e1024ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x33]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 33 <unknown>
-
-vlseg2e8ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e8ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x21]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 21 <unknown>
-
-vlseg2e16ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e16ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x21]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 21 <unknown>
-
-vlseg2e32ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e32ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x21]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 21 <unknown>
-
-vlseg2e64ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e64ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x21]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 21 <unknown>
-
-vlseg2e128ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e128ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x31]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 31 <unknown>
-
-vlseg2e256ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e256ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x31]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 31 <unknown>
-
-vlseg2e512ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e512ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x31]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 31 <unknown>
-
-vlseg2e1024ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg2e1024ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x31]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 31 <unknown>
-
-vlseg3e8ff.v v8, (a0)
-# CHECK-INST: vlseg3e8ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x43]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 43 <unknown>
-
-vlseg3e16ff.v v8, (a0)
-# CHECK-INST: vlseg3e16ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x43]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 43 <unknown>
-
-vlseg3e32ff.v v8, (a0)
-# CHECK-INST: vlseg3e32ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x43]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 43 <unknown>
-
-vlseg3e64ff.v v8, (a0)
-# CHECK-INST: vlseg3e64ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x43]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 43 <unknown>
-
-vlseg3e128ff.v v8, (a0)
-# CHECK-INST: vlseg3e128ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x53]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 53 <unknown>
-
-vlseg3e256ff.v v8, (a0)
-# CHECK-INST: vlseg3e256ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x53]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 53 <unknown>
-
-vlseg3e512ff.v v8, (a0)
-# CHECK-INST: vlseg3e512ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x53]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 53 <unknown>
-
-vlseg3e1024ff.v v8, (a0)
-# CHECK-INST: vlseg3e1024ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x53]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 53 <unknown>
-
-vlseg3e8ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e8ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x41]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 41 <unknown>
-
-vlseg3e16ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e16ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x41]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 41 <unknown>
-
-vlseg3e32ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e32ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x41]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 41 <unknown>
-
-vlseg3e64ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e64ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x41]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 41 <unknown>
-
-vlseg3e128ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e128ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x51]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 51 <unknown>
-
-vlseg3e256ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e256ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x51]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 51 <unknown>
-
-vlseg3e512ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e512ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x51]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 51 <unknown>
-
-vlseg3e1024ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg3e1024ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x51]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 51 <unknown>
-
-vlseg4e8ff.v v8, (a0)
-# CHECK-INST: vlseg4e8ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x63]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 63 <unknown>
-
-vlseg4e16ff.v v8, (a0)
-# CHECK-INST: vlseg4e16ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x63]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 63 <unknown>
-
-vlseg4e32ff.v v8, (a0)
-# CHECK-INST: vlseg4e32ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x63]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 63 <unknown>
-
-vlseg4e64ff.v v8, (a0)
-# CHECK-INST: vlseg4e64ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x63]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 63 <unknown>
-
-vlseg4e128ff.v v8, (a0)
-# CHECK-INST: vlseg4e128ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0x73]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 73 <unknown>
-
-vlseg4e256ff.v v8, (a0)
-# CHECK-INST: vlseg4e256ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0x73]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 73 <unknown>
-
-vlseg4e512ff.v v8, (a0)
-# CHECK-INST: vlseg4e512ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0x73]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 73 <unknown>
-
-vlseg4e1024ff.v v8, (a0)
-# CHECK-INST: vlseg4e1024ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0x73]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 73 <unknown>
-
-vlseg4e8ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e8ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x61]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 61 <unknown>
-
-vlseg4e16ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e16ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x61]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 61 <unknown>
-
-vlseg4e32ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e32ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x61]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 61 <unknown>
-
-vlseg4e64ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e64ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x61]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 61 <unknown>
-
-vlseg4e128ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e128ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x71]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 71 <unknown>
-
-vlseg4e256ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e256ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x71]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 71 <unknown>
-
-vlseg4e512ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e512ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x71]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 71 <unknown>
+vlseg5e1024.v v8, (a0)
+# CHECK-INST: vlseg5e1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0x92]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 92 <unknown>
-vlseg4e1024ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg4e1024ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x71]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 71 <unknown>
+vlseg5e8ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e8ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x81]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 81 <unknown>
vlseg5e8ff.v v8, (a0)
# CHECK-INST: vlseg5e8ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x04,0x05,0x83]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 05 83 <unknown>
+vlseg5e16ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e16ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x81]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 81 <unknown>
+
vlseg5e16ff.v v8, (a0)
# CHECK-INST: vlseg5e16ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x54,0x05,0x83]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 05 83 <unknown>
+vlseg5e32ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e32ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x81]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 81 <unknown>
+
vlseg5e32ff.v v8, (a0)
# CHECK-INST: vlseg5e32ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x64,0x05,0x83]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 05 83 <unknown>
+vlseg5e64ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e64ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x81]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 81 <unknown>
+
vlseg5e64ff.v v8, (a0)
# CHECK-INST: vlseg5e64ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x74,0x05,0x83]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 05 83 <unknown>
+vlseg5e128ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e128ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0x91]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 91 <unknown>
+
vlseg5e128ff.v v8, (a0)
# CHECK-INST: vlseg5e128ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x04,0x05,0x93]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 05 93 <unknown>
+vlseg5e256ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e256ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0x91]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 91 <unknown>
+
vlseg5e256ff.v v8, (a0)
# CHECK-INST: vlseg5e256ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x54,0x05,0x93]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 05 93 <unknown>
+vlseg5e512ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e512ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0x91]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 91 <unknown>
+
vlseg5e512ff.v v8, (a0)
# CHECK-INST: vlseg5e512ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x64,0x05,0x93]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 05 93 <unknown>
+vlseg5e1024ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg5e1024ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0x91]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 91 <unknown>
+
vlseg5e1024ff.v v8, (a0)
# CHECK-INST: vlseg5e1024ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x74,0x05,0x93]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 05 93 <unknown>
-vlseg5e8ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e8ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x81]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 81 <unknown>
+vlsseg5e8.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg5e8.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x88]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 88 <unknown>
-vlseg5e16ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e16ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x81]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 81 <unknown>
+vlsseg5e8.v v8, (a0), a1
+# CHECK-INST: vlsseg5e8.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x8a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 8a <unknown>
-vlseg5e32ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e32ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x81]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 81 <unknown>
+vlsseg5e16.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg5e16.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x88]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 88 <unknown>
-vlseg5e64ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e64ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x81]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 81 <unknown>
+vlsseg5e16.v v8, (a0), a1
+# CHECK-INST: vlsseg5e16.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x8a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 8a <unknown>
-vlseg5e128ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e128ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0x91]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 91 <unknown>
+vlsseg5e32.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg5e32.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x88]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 88 <unknown>
+
+vlsseg5e32.v v8, (a0), a1
+# CHECK-INST: vlsseg5e32.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x8a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 8a <unknown>
+
+vlsseg5e64.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg5e64.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x88]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 88 <unknown>
+
+vlsseg5e64.v v8, (a0), a1
+# CHECK-INST: vlsseg5e64.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x8a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 8a <unknown>
+
+vlsseg5e128.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg5e128.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x98]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 98 <unknown>
+
+vlsseg5e128.v v8, (a0), a1
+# CHECK-INST: vlsseg5e128.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0x9a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 9a <unknown>
+
+vlsseg5e256.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg5e256.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x98]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 98 <unknown>
+
+vlsseg5e256.v v8, (a0), a1
+# CHECK-INST: vlsseg5e256.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0x9a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 9a <unknown>
+
+vlsseg5e512.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg5e512.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x98]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 98 <unknown>
+
+vlsseg5e512.v v8, (a0), a1
+# CHECK-INST: vlsseg5e512.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0x9a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 9a <unknown>
+
+vlsseg5e1024.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg5e1024.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x98]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 98 <unknown>
+
+vlsseg5e1024.v v8, (a0), a1
+# CHECK-INST: vlsseg5e1024.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0x9a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 9a <unknown>
+
+vluxseg5ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg5ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x84]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 84 <unknown>
+
+vluxseg5ei8.v v8, (a0), v4
+# CHECK-INST: vluxseg5ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x86]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 86 <unknown>
+
+vluxseg5ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg5ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x84]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 84 <unknown>
+
+vluxseg5ei16.v v8, (a0), v4
+# CHECK-INST: vluxseg5ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x86]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 86 <unknown>
+
+vluxseg5ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg5ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x84]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 84 <unknown>
+
+vluxseg5ei32.v v8, (a0), v4
+# CHECK-INST: vluxseg5ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x86]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 86 <unknown>
+
+vluxseg5ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg5ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x84]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 84 <unknown>
+
+vluxseg5ei64.v v8, (a0), v4
+# CHECK-INST: vluxseg5ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x86]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 86 <unknown>
+
+vloxseg5ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg5ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0x8c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 8c <unknown>
+
+vloxseg5ei8.v v8, (a0), v4
+# CHECK-INST: vloxseg5ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0x8e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 8e <unknown>
+
+vloxseg5ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg5ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0x8c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 8c <unknown>
+
+vloxseg5ei16.v v8, (a0), v4
+# CHECK-INST: vloxseg5ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0x8e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 8e <unknown>
+
+vloxseg5ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg5ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0x8c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 8c <unknown>
+
+vloxseg5ei32.v v8, (a0), v4
+# CHECK-INST: vloxseg5ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0x8e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 8e <unknown>
+
+vloxseg5ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg5ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0x8c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 8c <unknown>
+
+vloxseg5ei64.v v8, (a0), v4
+# CHECK-INST: vloxseg5ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0x8e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 8e <unknown>
+
+vlseg6e8.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e8.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xa0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 a0 <unknown>
+
+vlseg6e8.v v8, (a0)
+# CHECK-INST: vlseg6e8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xa2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 a2 <unknown>
+
+vlseg6e16.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e16.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xa0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 a0 <unknown>
+
+vlseg6e16.v v8, (a0)
+# CHECK-INST: vlseg6e16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xa2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 a2 <unknown>
+
+vlseg6e32.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e32.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xa0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 a0 <unknown>
+
+vlseg6e32.v v8, (a0)
+# CHECK-INST: vlseg6e32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xa2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 a2 <unknown>
+
+vlseg6e64.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e64.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xa0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 a0 <unknown>
+
+vlseg6e64.v v8, (a0)
+# CHECK-INST: vlseg6e64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xa2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 a2 <unknown>
+
+vlseg6e128.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e128.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xb0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 b0 <unknown>
+
+vlseg6e128.v v8, (a0)
+# CHECK-INST: vlseg6e128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xb2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 b2 <unknown>
+
+vlseg6e256.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e256.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xb0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 b0 <unknown>
+
+vlseg6e256.v v8, (a0)
+# CHECK-INST: vlseg6e256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xb2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 b2 <unknown>
+
+vlseg6e512.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e512.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xb0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 b0 <unknown>
-vlseg5e256ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e256ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0x91]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 91 <unknown>
+vlseg6e512.v v8, (a0)
+# CHECK-INST: vlseg6e512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xb2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 b2 <unknown>
-vlseg5e512ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e512ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0x91]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 91 <unknown>
+vlseg6e1024.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e1024.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xb0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 b0 <unknown>
-vlseg5e1024ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg5e1024ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0x91]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 91 <unknown>
+vlseg6e1024.v v8, (a0)
+# CHECK-INST: vlseg6e1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xb2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 b2 <unknown>
+
+vlseg6e8ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e8ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xa1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 a1 <unknown>
vlseg6e8ff.v v8, (a0)
# CHECK-INST: vlseg6e8ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x04,0x05,0xa3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 05 a3 <unknown>
+vlseg6e16ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e16ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xa1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 a1 <unknown>
+
vlseg6e16ff.v v8, (a0)
# CHECK-INST: vlseg6e16ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x54,0x05,0xa3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 05 a3 <unknown>
+vlseg6e32ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e32ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xa1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 a1 <unknown>
+
vlseg6e32ff.v v8, (a0)
# CHECK-INST: vlseg6e32ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x64,0x05,0xa3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 05 a3 <unknown>
+vlseg6e64ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e64ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xa1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 a1 <unknown>
+
vlseg6e64ff.v v8, (a0)
# CHECK-INST: vlseg6e64ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x74,0x05,0xa3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 05 a3 <unknown>
+vlseg6e128ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e128ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xb1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 b1 <unknown>
+
vlseg6e128ff.v v8, (a0)
# CHECK-INST: vlseg6e128ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x04,0x05,0xb3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 05 b3 <unknown>
+vlseg6e256ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e256ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xb1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 b1 <unknown>
+
vlseg6e256ff.v v8, (a0)
# CHECK-INST: vlseg6e256ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x54,0x05,0xb3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 05 b3 <unknown>
+vlseg6e512ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e512ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xb1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 b1 <unknown>
+
vlseg6e512ff.v v8, (a0)
# CHECK-INST: vlseg6e512ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x64,0x05,0xb3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 05 b3 <unknown>
+vlseg6e1024ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg6e1024ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xb1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 b1 <unknown>
+
vlseg6e1024ff.v v8, (a0)
# CHECK-INST: vlseg6e1024ff.v v8, (a0)
# CHECK-ENCODING: [0x07,0x74,0x05,0xb3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 05 b3 <unknown>
-vlseg6e8ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e8ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xa1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 a1 <unknown>
+vlsseg6e8.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg6e8.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xa8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 a8 <unknown>
-vlseg6e16ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e16ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xa1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 a1 <unknown>
+vlsseg6e8.v v8, (a0), a1
+# CHECK-INST: vlsseg6e8.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xaa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 aa <unknown>
-vlseg6e32ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e32ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xa1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 a1 <unknown>
+vlsseg6e16.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg6e16.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xa8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 a8 <unknown>
-vlseg6e64ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e64ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xa1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 a1 <unknown>
+vlsseg6e16.v v8, (a0), a1
+# CHECK-INST: vlsseg6e16.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xaa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 aa <unknown>
-vlseg6e128ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e128ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xb1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 b1 <unknown>
+vlsseg6e32.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg6e32.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xa8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 a8 <unknown>
-vlseg6e256ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e256ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xb1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 b1 <unknown>
+vlsseg6e32.v v8, (a0), a1
+# CHECK-INST: vlsseg6e32.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xaa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 aa <unknown>
-vlseg6e512ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e512ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xb1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 b1 <unknown>
+vlsseg6e64.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg6e64.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xa8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 a8 <unknown>
-vlseg6e1024ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg6e1024ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xb1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 b1 <unknown>
+vlsseg6e64.v v8, (a0), a1
+# CHECK-INST: vlsseg6e64.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xaa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 aa <unknown>
-vlseg7e8ff.v v8, (a0)
-# CHECK-INST: vlseg7e8ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xc3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 c3 <unknown>
+vlsseg6e128.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg6e128.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xb8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 b8 <unknown>
-vlseg7e16ff.v v8, (a0)
-# CHECK-INST: vlseg7e16ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xc3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 c3 <unknown>
+vlsseg6e128.v v8, (a0), a1
+# CHECK-INST: vlsseg6e128.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xba]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 ba <unknown>
-vlseg7e32ff.v v8, (a0)
-# CHECK-INST: vlseg7e32ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xc3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 c3 <unknown>
+vlsseg6e256.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg6e256.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xb8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 b8 <unknown>
+
+vlsseg6e256.v v8, (a0), a1
+# CHECK-INST: vlsseg6e256.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xba]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 ba <unknown>
+
+vlsseg6e512.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg6e512.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xb8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 b8 <unknown>
+
+vlsseg6e512.v v8, (a0), a1
+# CHECK-INST: vlsseg6e512.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xba]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 ba <unknown>
+
+vlsseg6e1024.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg6e1024.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xb8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 b8 <unknown>
+
+vlsseg6e1024.v v8, (a0), a1
+# CHECK-INST: vlsseg6e1024.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xba]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 ba <unknown>
+
+vluxseg6ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg6ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0xa4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 a4 <unknown>
+
+vluxseg6ei8.v v8, (a0), v4
+# CHECK-INST: vluxseg6ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0xa6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 a6 <unknown>
+
+vluxseg6ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg6ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0xa4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 a4 <unknown>
+
+vluxseg6ei16.v v8, (a0), v4
+# CHECK-INST: vluxseg6ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0xa6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 a6 <unknown>
+
+vluxseg6ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg6ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0xa4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 a4 <unknown>
+
+vluxseg6ei32.v v8, (a0), v4
+# CHECK-INST: vluxseg6ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0xa6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 a6 <unknown>
+
+vluxseg6ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg6ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0xa4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 a4 <unknown>
+
+vluxseg6ei64.v v8, (a0), v4
+# CHECK-INST: vluxseg6ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0xa6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 a6 <unknown>
+
+vloxseg6ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg6ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0xac]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 ac <unknown>
+
+vloxseg6ei8.v v8, (a0), v4
+# CHECK-INST: vloxseg6ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0xae]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 ae <unknown>
+
+vloxseg6ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg6ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0xac]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 ac <unknown>
+
+vloxseg6ei16.v v8, (a0), v4
+# CHECK-INST: vloxseg6ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0xae]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 ae <unknown>
+
+vloxseg6ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg6ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0xac]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 ac <unknown>
+
+vloxseg6ei32.v v8, (a0), v4
+# CHECK-INST: vloxseg6ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0xae]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 ae <unknown>
+
+vloxseg6ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg6ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0xac]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 ac <unknown>
+
+vloxseg6ei64.v v8, (a0), v4
+# CHECK-INST: vloxseg6ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0xae]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 ae <unknown>
+
+vlseg7e8.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e8.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xc0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 c0 <unknown>
+
+vlseg7e8.v v8, (a0)
+# CHECK-INST: vlseg7e8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xc2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 c2 <unknown>
+
+vlseg7e16.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e16.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xc0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 c0 <unknown>
+
+vlseg7e16.v v8, (a0)
+# CHECK-INST: vlseg7e16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xc2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 c2 <unknown>
+
+vlseg7e32.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e32.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xc0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 c0 <unknown>
+
+vlseg7e32.v v8, (a0)
+# CHECK-INST: vlseg7e32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xc2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 c2 <unknown>
+
+vlseg7e64.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e64.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xc0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 c0 <unknown>
+
+vlseg7e64.v v8, (a0)
+# CHECK-INST: vlseg7e64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xc2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 c2 <unknown>
+
+vlseg7e128.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e128.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xd0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 d0 <unknown>
+
+vlseg7e128.v v8, (a0)
+# CHECK-INST: vlseg7e128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xd2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 d2 <unknown>
+
+vlseg7e256.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e256.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xd0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 d0 <unknown>
-vlseg7e64ff.v v8, (a0)
-# CHECK-INST: vlseg7e64ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xc3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 c3 <unknown>
+vlseg7e256.v v8, (a0)
+# CHECK-INST: vlseg7e256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xd2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 d2 <unknown>
-vlseg7e128ff.v v8, (a0)
-# CHECK-INST: vlseg7e128ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xd3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 d3 <unknown>
+vlseg7e512.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e512.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xd0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 d0 <unknown>
-vlseg7e256ff.v v8, (a0)
-# CHECK-INST: vlseg7e256ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xd3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 d3 <unknown>
+vlseg7e512.v v8, (a0)
+# CHECK-INST: vlseg7e512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xd2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 d2 <unknown>
-vlseg7e512ff.v v8, (a0)
-# CHECK-INST: vlseg7e512ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xd3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 d3 <unknown>
+vlseg7e1024.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e1024.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xd0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 d0 <unknown>
-vlseg7e1024ff.v v8, (a0)
-# CHECK-INST: vlseg7e1024ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xd3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 d3 <unknown>
+vlseg7e1024.v v8, (a0)
+# CHECK-INST: vlseg7e1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xd2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 d2 <unknown>
vlseg7e8ff.v v8, (a0), v0.t
# CHECK-INST: vlseg7e8ff.v v8, (a0), v0.t
# CHECK-ENCODING: [0x07,0x04,0x05,0xc1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 05 c1 <unknown>
+vlseg7e8ff.v v8, (a0)
+# CHECK-INST: vlseg7e8ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xc3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 c3 <unknown>
+
vlseg7e16ff.v v8, (a0), v0.t
# CHECK-INST: vlseg7e16ff.v v8, (a0), v0.t
# CHECK-ENCODING: [0x07,0x54,0x05,0xc1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 05 c1 <unknown>
+vlseg7e16ff.v v8, (a0)
+# CHECK-INST: vlseg7e16ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xc3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 c3 <unknown>
+
vlseg7e32ff.v v8, (a0), v0.t
# CHECK-INST: vlseg7e32ff.v v8, (a0), v0.t
# CHECK-ENCODING: [0x07,0x64,0x05,0xc1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 64 05 c1 <unknown>
+vlseg7e32ff.v v8, (a0)
+# CHECK-INST: vlseg7e32ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xc3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 c3 <unknown>
+
vlseg7e64ff.v v8, (a0), v0.t
# CHECK-INST: vlseg7e64ff.v v8, (a0), v0.t
# CHECK-ENCODING: [0x07,0x74,0x05,0xc1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 74 05 c1 <unknown>
+vlseg7e64ff.v v8, (a0)
+# CHECK-INST: vlseg7e64ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xc3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 c3 <unknown>
+
vlseg7e128ff.v v8, (a0), v0.t
# CHECK-INST: vlseg7e128ff.v v8, (a0), v0.t
# CHECK-ENCODING: [0x07,0x04,0x05,0xd1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 04 05 d1 <unknown>
+vlseg7e128ff.v v8, (a0)
+# CHECK-INST: vlseg7e128ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xd3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 d3 <unknown>
+
vlseg7e256ff.v v8, (a0), v0.t
# CHECK-INST: vlseg7e256ff.v v8, (a0), v0.t
# CHECK-ENCODING: [0x07,0x54,0x05,0xd1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 07 54 05 d1 <unknown>
-vlseg7e512ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e512ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xd1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 d1 <unknown>
-
-vlseg7e1024ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg7e1024ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xd1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 d1 <unknown>
-
-vlseg8e8ff.v v8, (a0)
-# CHECK-INST: vlseg8e8ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xe3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 e3 <unknown>
-
-vlseg8e16ff.v v8, (a0)
-# CHECK-INST: vlseg8e16ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xe3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 e3 <unknown>
-
-vlseg8e32ff.v v8, (a0)
-# CHECK-INST: vlseg8e32ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xe3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 e3 <unknown>
-
-vlseg8e64ff.v v8, (a0)
-# CHECK-INST: vlseg8e64ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xe3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 e3 <unknown>
-
-vlseg8e128ff.v v8, (a0)
-# CHECK-INST: vlseg8e128ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x04,0x05,0xf3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 f3 <unknown>
-
-vlseg8e256ff.v v8, (a0)
-# CHECK-INST: vlseg8e256ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x54,0x05,0xf3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 f3 <unknown>
-
-vlseg8e512ff.v v8, (a0)
-# CHECK-INST: vlseg8e512ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x64,0x05,0xf3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 f3 <unknown>
-
-vlseg8e1024ff.v v8, (a0)
-# CHECK-INST: vlseg8e1024ff.v v8, (a0)
-# CHECK-ENCODING: [0x07,0x74,0x05,0xf3]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 f3 <unknown>
-
-vlseg8e8ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e8ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xe1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 e1 <unknown>
-
-vlseg8e16ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e16ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xe1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 e1 <unknown>
-
-vlseg8e32ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e32ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xe1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 e1 <unknown>
-
-vlseg8e64ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e64ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xe1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 e1 <unknown>
-
-vlseg8e128ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e128ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x04,0x05,0xf1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 04 05 f1 <unknown>
-
-vlseg8e256ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e256ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x54,0x05,0xf1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 54 05 f1 <unknown>
-
-vlseg8e512ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e512ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x64,0x05,0xf1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 64 05 f1 <unknown>
-
-vlseg8e1024ff.v v8, (a0), v0.t
-# CHECK-INST: vlseg8e1024ff.v v8, (a0), v0.t
-# CHECK-ENCODING: [0x07,0x74,0x05,0xf1]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 07 74 05 f1 <unknown>
-
-vsseg2e8.v v24, (a0)
-# CHECK-INST: vsseg2e8.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x22]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 22 <unknown>
-
-vsseg2e16.v v24, (a0)
-# CHECK-INST: vsseg2e16.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x22]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 22 <unknown>
-
-vsseg2e32.v v24, (a0)
-# CHECK-INST: vsseg2e32.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x22]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 22 <unknown>
-
-vsseg2e64.v v24, (a0)
-# CHECK-INST: vsseg2e64.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x22]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 22 <unknown>
-
-vsseg2e128.v v24, (a0)
-# CHECK-INST: vsseg2e128.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x32]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 32 <unknown>
-
-vsseg2e256.v v24, (a0)
-# CHECK-INST: vsseg2e256.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x32]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 32 <unknown>
-
-vsseg2e512.v v24, (a0)
-# CHECK-INST: vsseg2e512.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x32]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 32 <unknown>
-
-vsseg2e1024.v v24, (a0)
-# CHECK-INST: vsseg2e1024.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x32]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 32 <unknown>
-
-vsseg2e8.v v24, (a0), v0.t
-# CHECK-INST: vsseg2e8.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x20]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 20 <unknown>
-
-vsseg2e16.v v24, (a0), v0.t
-# CHECK-INST: vsseg2e16.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x20]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 20 <unknown>
-
-vsseg2e32.v v24, (a0), v0.t
-# CHECK-INST: vsseg2e32.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x20]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 20 <unknown>
-
-vsseg2e64.v v24, (a0), v0.t
-# CHECK-INST: vsseg2e64.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x20]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 20 <unknown>
-
-vsseg2e128.v v24, (a0), v0.t
-# CHECK-INST: vsseg2e128.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x30]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 30 <unknown>
-
-vsseg2e256.v v24, (a0), v0.t
-# CHECK-INST: vsseg2e256.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x30]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 30 <unknown>
-
-vsseg2e512.v v24, (a0), v0.t
-# CHECK-INST: vsseg2e512.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x30]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 30 <unknown>
-
-vsseg2e1024.v v24, (a0), v0.t
-# CHECK-INST: vsseg2e1024.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x30]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 30 <unknown>
-
-vsseg3e8.v v24, (a0)
-# CHECK-INST: vsseg3e8.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x42]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 42 <unknown>
-
-vsseg3e16.v v24, (a0)
-# CHECK-INST: vsseg3e16.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x42]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 42 <unknown>
-
-vsseg3e32.v v24, (a0)
-# CHECK-INST: vsseg3e32.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x42]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 42 <unknown>
-
-vsseg3e64.v v24, (a0)
-# CHECK-INST: vsseg3e64.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x42]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 42 <unknown>
-
-vsseg3e128.v v24, (a0)
-# CHECK-INST: vsseg3e128.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x52]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 52 <unknown>
+vlseg7e256ff.v v8, (a0)
+# CHECK-INST: vlseg7e256ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xd3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 d3 <unknown>
-vsseg3e256.v v24, (a0)
-# CHECK-INST: vsseg3e256.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x52]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 52 <unknown>
+vlseg7e512ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e512ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xd1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 d1 <unknown>
-vsseg3e512.v v24, (a0)
-# CHECK-INST: vsseg3e512.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x52]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 52 <unknown>
+vlseg7e512ff.v v8, (a0)
+# CHECK-INST: vlseg7e512ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xd3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 d3 <unknown>
-vsseg3e1024.v v24, (a0)
-# CHECK-INST: vsseg3e1024.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x52]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 52 <unknown>
+vlseg7e1024ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg7e1024ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xd1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 d1 <unknown>
-vsseg3e8.v v24, (a0), v0.t
-# CHECK-INST: vsseg3e8.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x40]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 40 <unknown>
+vlseg7e1024ff.v v8, (a0)
+# CHECK-INST: vlseg7e1024ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xd3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 d3 <unknown>
-vsseg3e16.v v24, (a0), v0.t
-# CHECK-INST: vsseg3e16.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x40]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 40 <unknown>
+vlsseg7e8.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg7e8.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xc8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 c8 <unknown>
-vsseg3e32.v v24, (a0), v0.t
-# CHECK-INST: vsseg3e32.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x40]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 40 <unknown>
+vlsseg7e8.v v8, (a0), a1
+# CHECK-INST: vlsseg7e8.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xca]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 ca <unknown>
-vsseg3e64.v v24, (a0), v0.t
-# CHECK-INST: vsseg3e64.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x40]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 40 <unknown>
+vlsseg7e16.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg7e16.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xc8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 c8 <unknown>
-vsseg3e128.v v24, (a0), v0.t
-# CHECK-INST: vsseg3e128.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x50]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 50 <unknown>
+vlsseg7e16.v v8, (a0), a1
+# CHECK-INST: vlsseg7e16.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xca]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 ca <unknown>
-vsseg3e256.v v24, (a0), v0.t
-# CHECK-INST: vsseg3e256.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x50]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 50 <unknown>
+vlsseg7e32.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg7e32.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xc8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 c8 <unknown>
-vsseg3e512.v v24, (a0), v0.t
-# CHECK-INST: vsseg3e512.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x50]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 50 <unknown>
+vlsseg7e32.v v8, (a0), a1
+# CHECK-INST: vlsseg7e32.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xca]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 ca <unknown>
-vsseg3e1024.v v24, (a0), v0.t
-# CHECK-INST: vsseg3e1024.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x50]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 50 <unknown>
+vlsseg7e64.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg7e64.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xc8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 c8 <unknown>
-vsseg4e8.v v24, (a0)
-# CHECK-INST: vsseg4e8.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x62]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 62 <unknown>
+vlsseg7e64.v v8, (a0), a1
+# CHECK-INST: vlsseg7e64.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xca]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 ca <unknown>
-vsseg4e16.v v24, (a0)
-# CHECK-INST: vsseg4e16.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x62]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 62 <unknown>
+vlsseg7e128.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg7e128.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xd8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 d8 <unknown>
-vsseg4e32.v v24, (a0)
-# CHECK-INST: vsseg4e32.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x62]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 62 <unknown>
+vlsseg7e128.v v8, (a0), a1
+# CHECK-INST: vlsseg7e128.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xda]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 da <unknown>
-vsseg4e64.v v24, (a0)
-# CHECK-INST: vsseg4e64.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x62]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 62 <unknown>
+vlsseg7e256.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg7e256.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xd8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 d8 <unknown>
-vsseg4e128.v v24, (a0)
-# CHECK-INST: vsseg4e128.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x72]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 72 <unknown>
+vlsseg7e256.v v8, (a0), a1
+# CHECK-INST: vlsseg7e256.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xda]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 da <unknown>
-vsseg4e256.v v24, (a0)
-# CHECK-INST: vsseg4e256.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x72]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 72 <unknown>
+vlsseg7e512.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg7e512.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xd8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 d8 <unknown>
-vsseg4e512.v v24, (a0)
-# CHECK-INST: vsseg4e512.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x72]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 72 <unknown>
+vlsseg7e512.v v8, (a0), a1
+# CHECK-INST: vlsseg7e512.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xda]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 da <unknown>
-vsseg4e1024.v v24, (a0)
-# CHECK-INST: vsseg4e1024.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x72]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 72 <unknown>
+vlsseg7e1024.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg7e1024.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xd8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 d8 <unknown>
-vsseg4e8.v v24, (a0), v0.t
-# CHECK-INST: vsseg4e8.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x60]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 60 <unknown>
+vlsseg7e1024.v v8, (a0), a1
+# CHECK-INST: vlsseg7e1024.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xda]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 da <unknown>
-vsseg4e16.v v24, (a0), v0.t
-# CHECK-INST: vsseg4e16.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x60]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 60 <unknown>
+vluxseg7ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg7ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0xc4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 c4 <unknown>
+
+vluxseg7ei8.v v8, (a0), v4
+# CHECK-INST: vluxseg7ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0xc6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 c6 <unknown>
+
+vluxseg7ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg7ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0xc4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 c4 <unknown>
+
+vluxseg7ei16.v v8, (a0), v4
+# CHECK-INST: vluxseg7ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0xc6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 c6 <unknown>
+
+vluxseg7ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg7ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0xc4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 c4 <unknown>
+
+vluxseg7ei32.v v8, (a0), v4
+# CHECK-INST: vluxseg7ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0xc6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 c6 <unknown>
+
+vluxseg7ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg7ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0xc4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 c4 <unknown>
+
+vluxseg7ei64.v v8, (a0), v4
+# CHECK-INST: vluxseg7ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0xc6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 c6 <unknown>
+
+vloxseg7ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg7ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0xcc]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 cc <unknown>
-vsseg4e32.v v24, (a0), v0.t
-# CHECK-INST: vsseg4e32.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x60]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 60 <unknown>
+vloxseg7ei8.v v8, (a0), v4
+# CHECK-INST: vloxseg7ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0xce]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 ce <unknown>
-vsseg4e64.v v24, (a0), v0.t
-# CHECK-INST: vsseg4e64.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x60]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 60 <unknown>
+vloxseg7ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg7ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0xcc]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 cc <unknown>
-vsseg4e128.v v24, (a0), v0.t
-# CHECK-INST: vsseg4e128.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x70]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 70 <unknown>
+vloxseg7ei16.v v8, (a0), v4
+# CHECK-INST: vloxseg7ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0xce]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 ce <unknown>
-vsseg4e256.v v24, (a0), v0.t
-# CHECK-INST: vsseg4e256.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x70]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 70 <unknown>
+vloxseg7ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg7ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0xcc]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 cc <unknown>
-vsseg4e512.v v24, (a0), v0.t
-# CHECK-INST: vsseg4e512.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x70]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 70 <unknown>
+vloxseg7ei32.v v8, (a0), v4
+# CHECK-INST: vloxseg7ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0xce]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 ce <unknown>
-vsseg4e1024.v v24, (a0), v0.t
-# CHECK-INST: vsseg4e1024.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x70]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 70 <unknown>
+vloxseg7ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg7ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0xcc]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 cc <unknown>
-vsseg5e8.v v24, (a0)
-# CHECK-INST: vsseg5e8.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x82]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 82 <unknown>
+vloxseg7ei64.v v8, (a0), v4
+# CHECK-INST: vloxseg7ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0xce]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 ce <unknown>
-vsseg5e16.v v24, (a0)
-# CHECK-INST: vsseg5e16.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x82]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 82 <unknown>
+vlseg8e8.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e8.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xe0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 e0 <unknown>
-vsseg5e32.v v24, (a0)
-# CHECK-INST: vsseg5e32.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x82]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 82 <unknown>
+vlseg8e8.v v8, (a0)
+# CHECK-INST: vlseg8e8.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xe2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 e2 <unknown>
-vsseg5e64.v v24, (a0)
-# CHECK-INST: vsseg5e64.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x82]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 82 <unknown>
+vlseg8e16.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e16.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xe0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 e0 <unknown>
-vsseg5e128.v v24, (a0)
-# CHECK-INST: vsseg5e128.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x92]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 92 <unknown>
+vlseg8e16.v v8, (a0)
+# CHECK-INST: vlseg8e16.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xe2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 e2 <unknown>
-vsseg5e256.v v24, (a0)
-# CHECK-INST: vsseg5e256.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x92]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 92 <unknown>
+vlseg8e32.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e32.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xe0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 e0 <unknown>
-vsseg5e512.v v24, (a0)
-# CHECK-INST: vsseg5e512.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x92]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 92 <unknown>
+vlseg8e32.v v8, (a0)
+# CHECK-INST: vlseg8e32.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xe2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 e2 <unknown>
-vsseg5e1024.v v24, (a0)
-# CHECK-INST: vsseg5e1024.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x92]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 92 <unknown>
+vlseg8e64.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e64.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xe0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 e0 <unknown>
-vsseg5e8.v v24, (a0), v0.t
-# CHECK-INST: vsseg5e8.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x80]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 80 <unknown>
+vlseg8e64.v v8, (a0)
+# CHECK-INST: vlseg8e64.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xe2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 e2 <unknown>
-vsseg5e16.v v24, (a0), v0.t
-# CHECK-INST: vsseg5e16.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x80]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 80 <unknown>
+vlseg8e128.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e128.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xf0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 f0 <unknown>
-vsseg5e32.v v24, (a0), v0.t
-# CHECK-INST: vsseg5e32.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x80]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 80 <unknown>
+vlseg8e128.v v8, (a0)
+# CHECK-INST: vlseg8e128.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xf2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 f2 <unknown>
-vsseg5e64.v v24, (a0), v0.t
-# CHECK-INST: vsseg5e64.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x80]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 80 <unknown>
+vlseg8e256.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e256.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xf0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 f0 <unknown>
-vsseg5e128.v v24, (a0), v0.t
-# CHECK-INST: vsseg5e128.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0x90]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 90 <unknown>
+vlseg8e256.v v8, (a0)
+# CHECK-INST: vlseg8e256.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xf2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 f2 <unknown>
-vsseg5e256.v v24, (a0), v0.t
-# CHECK-INST: vsseg5e256.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0x90]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 90 <unknown>
+vlseg8e512.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e512.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xf0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 f0 <unknown>
-vsseg5e512.v v24, (a0), v0.t
-# CHECK-INST: vsseg5e512.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0x90]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 90 <unknown>
+vlseg8e512.v v8, (a0)
+# CHECK-INST: vlseg8e512.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xf2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 f2 <unknown>
-vsseg5e1024.v v24, (a0), v0.t
-# CHECK-INST: vsseg5e1024.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0x90]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 90 <unknown>
+vlseg8e1024.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e1024.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xf0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 f0 <unknown>
-vsseg6e8.v v24, (a0)
-# CHECK-INST: vsseg6e8.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xa2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 a2 <unknown>
+vlseg8e1024.v v8, (a0)
+# CHECK-INST: vlseg8e1024.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xf2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 f2 <unknown>
-vsseg6e16.v v24, (a0)
-# CHECK-INST: vsseg6e16.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xa2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 a2 <unknown>
+vlseg8e8ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e8ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xe1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 e1 <unknown>
-vsseg6e32.v v24, (a0)
-# CHECK-INST: vsseg6e32.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xa2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 a2 <unknown>
+vlseg8e8ff.v v8, (a0)
+# CHECK-INST: vlseg8e8ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xe3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 e3 <unknown>
-vsseg6e64.v v24, (a0)
-# CHECK-INST: vsseg6e64.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xa2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 a2 <unknown>
+vlseg8e16ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e16ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xe1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 e1 <unknown>
-vsseg6e128.v v24, (a0)
-# CHECK-INST: vsseg6e128.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xb2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 b2 <unknown>
+vlseg8e16ff.v v8, (a0)
+# CHECK-INST: vlseg8e16ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xe3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 e3 <unknown>
-vsseg6e256.v v24, (a0)
-# CHECK-INST: vsseg6e256.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xb2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 b2 <unknown>
+vlseg8e32ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e32ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xe1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 e1 <unknown>
-vsseg6e512.v v24, (a0)
-# CHECK-INST: vsseg6e512.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xb2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 b2 <unknown>
+vlseg8e32ff.v v8, (a0)
+# CHECK-INST: vlseg8e32ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xe3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 e3 <unknown>
-vsseg6e1024.v v24, (a0)
-# CHECK-INST: vsseg6e1024.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xb2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 b2 <unknown>
+vlseg8e64ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e64ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xe1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 e1 <unknown>
-vsseg6e8.v v24, (a0), v0.t
-# CHECK-INST: vsseg6e8.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xa0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 a0 <unknown>
+vlseg8e64ff.v v8, (a0)
+# CHECK-INST: vlseg8e64ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xe3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 e3 <unknown>
-vsseg6e16.v v24, (a0), v0.t
-# CHECK-INST: vsseg6e16.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xa0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 a0 <unknown>
+vlseg8e128ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e128ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x04,0x05,0xf1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 f1 <unknown>
-vsseg6e32.v v24, (a0), v0.t
-# CHECK-INST: vsseg6e32.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xa0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 a0 <unknown>
+vlseg8e128ff.v v8, (a0)
+# CHECK-INST: vlseg8e128ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x04,0x05,0xf3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 05 f3 <unknown>
-vsseg6e64.v v24, (a0), v0.t
-# CHECK-INST: vsseg6e64.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xa0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 a0 <unknown>
+vlseg8e256ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e256ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x54,0x05,0xf1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 f1 <unknown>
-vsseg6e128.v v24, (a0), v0.t
-# CHECK-INST: vsseg6e128.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xb0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 b0 <unknown>
+vlseg8e256ff.v v8, (a0)
+# CHECK-INST: vlseg8e256ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x54,0x05,0xf3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 05 f3 <unknown>
-vsseg6e256.v v24, (a0), v0.t
-# CHECK-INST: vsseg6e256.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xb0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 b0 <unknown>
+vlseg8e512ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e512ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x64,0x05,0xf1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 f1 <unknown>
-vsseg6e512.v v24, (a0), v0.t
-# CHECK-INST: vsseg6e512.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xb0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 b0 <unknown>
+vlseg8e512ff.v v8, (a0)
+# CHECK-INST: vlseg8e512ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x64,0x05,0xf3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 05 f3 <unknown>
-vsseg6e1024.v v24, (a0), v0.t
-# CHECK-INST: vsseg6e1024.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xb0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 b0 <unknown>
+vlseg8e1024ff.v v8, (a0), v0.t
+# CHECK-INST: vlseg8e1024ff.v v8, (a0), v0.t
+# CHECK-ENCODING: [0x07,0x74,0x05,0xf1]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 f1 <unknown>
-vsseg7e8.v v24, (a0)
-# CHECK-INST: vsseg7e8.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xc2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 c2 <unknown>
+vlseg8e1024ff.v v8, (a0)
+# CHECK-INST: vlseg8e1024ff.v v8, (a0)
+# CHECK-ENCODING: [0x07,0x74,0x05,0xf3]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 05 f3 <unknown>
-vsseg7e16.v v24, (a0)
-# CHECK-INST: vsseg7e16.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xc2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 c2 <unknown>
+vlsseg8e8.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg8e8.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xe8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 e8 <unknown>
-vsseg7e32.v v24, (a0)
-# CHECK-INST: vsseg7e32.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xc2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 c2 <unknown>
+vlsseg8e8.v v8, (a0), a1
+# CHECK-INST: vlsseg8e8.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xea]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 ea <unknown>
-vsseg7e64.v v24, (a0)
-# CHECK-INST: vsseg7e64.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xc2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 c2 <unknown>
+vlsseg8e16.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg8e16.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xe8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 e8 <unknown>
-vsseg7e128.v v24, (a0)
-# CHECK-INST: vsseg7e128.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xd2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 d2 <unknown>
+vlsseg8e16.v v8, (a0), a1
+# CHECK-INST: vlsseg8e16.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xea]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 ea <unknown>
-vsseg7e256.v v24, (a0)
-# CHECK-INST: vsseg7e256.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xd2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 d2 <unknown>
+vlsseg8e32.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg8e32.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xe8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 e8 <unknown>
-vsseg7e512.v v24, (a0)
-# CHECK-INST: vsseg7e512.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xd2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 d2 <unknown>
+vlsseg8e32.v v8, (a0), a1
+# CHECK-INST: vlsseg8e32.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xea]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 ea <unknown>
-vsseg7e1024.v v24, (a0)
-# CHECK-INST: vsseg7e1024.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xd2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 d2 <unknown>
+vlsseg8e64.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg8e64.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xe8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 e8 <unknown>
-vsseg7e8.v v24, (a0), v0.t
-# CHECK-INST: vsseg7e8.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xc0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 c0 <unknown>
+vlsseg8e64.v v8, (a0), a1
+# CHECK-INST: vlsseg8e64.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xea]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 ea <unknown>
-vsseg7e16.v v24, (a0), v0.t
-# CHECK-INST: vsseg7e16.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xc0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 c0 <unknown>
+vlsseg8e128.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg8e128.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xf8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 f8 <unknown>
-vsseg7e32.v v24, (a0), v0.t
-# CHECK-INST: vsseg7e32.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xc0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 c0 <unknown>
+vlsseg8e128.v v8, (a0), a1
+# CHECK-INST: vlsseg8e128.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x04,0xb5,0xfa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 b5 fa <unknown>
-vsseg7e64.v v24, (a0), v0.t
-# CHECK-INST: vsseg7e64.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xc0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 c0 <unknown>
+vlsseg8e256.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg8e256.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xf8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 f8 <unknown>
-vsseg7e128.v v24, (a0), v0.t
-# CHECK-INST: vsseg7e128.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xd0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 d0 <unknown>
+vlsseg8e256.v v8, (a0), a1
+# CHECK-INST: vlsseg8e256.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x54,0xb5,0xfa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 b5 fa <unknown>
-vsseg7e256.v v24, (a0), v0.t
-# CHECK-INST: vsseg7e256.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xd0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 d0 <unknown>
+vlsseg8e512.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg8e512.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xf8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 f8 <unknown>
-vsseg7e512.v v24, (a0), v0.t
-# CHECK-INST: vsseg7e512.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xd0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 d0 <unknown>
+vlsseg8e512.v v8, (a0), a1
+# CHECK-INST: vlsseg8e512.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x64,0xb5,0xfa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 b5 fa <unknown>
-vsseg7e1024.v v24, (a0), v0.t
-# CHECK-INST: vsseg7e1024.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xd0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 d0 <unknown>
+vlsseg8e1024.v v8, (a0), a1, v0.t
+# CHECK-INST: vlsseg8e1024.v v8, (a0), a1, v0.t
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xf8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 f8 <unknown>
-vsseg8e8.v v24, (a0)
-# CHECK-INST: vsseg8e8.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xe2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 e2 <unknown>
+vlsseg8e1024.v v8, (a0), a1
+# CHECK-INST: vlsseg8e1024.v v8, (a0), a1
+# CHECK-ENCODING: [0x07,0x74,0xb5,0xfa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 b5 fa <unknown>
-vsseg8e16.v v24, (a0)
-# CHECK-INST: vsseg8e16.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xe2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 e2 <unknown>
+vluxseg8ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg8ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0xe4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 e4 <unknown>
+
+vluxseg8ei8.v v8, (a0), v4
+# CHECK-INST: vluxseg8ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0xe6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 e6 <unknown>
+
+vluxseg8ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg8ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0xe4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 e4 <unknown>
+
+vluxseg8ei16.v v8, (a0), v4
+# CHECK-INST: vluxseg8ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0xe6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 e6 <unknown>
+
+vluxseg8ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg8ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0xe4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 e4 <unknown>
+
+vluxseg8ei32.v v8, (a0), v4
+# CHECK-INST: vluxseg8ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0xe6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 e6 <unknown>
+
+vluxseg8ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vluxseg8ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0xe4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 e4 <unknown>
+
+vluxseg8ei64.v v8, (a0), v4
+# CHECK-INST: vluxseg8ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0xe6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 e6 <unknown>
+
+vloxseg8ei8.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg8ei8.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x04,0x45,0xec]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 ec <unknown>
-vsseg8e32.v v24, (a0)
-# CHECK-INST: vsseg8e32.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xe2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 e2 <unknown>
+vloxseg8ei8.v v8, (a0), v4
+# CHECK-INST: vloxseg8ei8.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x04,0x45,0xee]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 04 45 ee <unknown>
-vsseg8e64.v v24, (a0)
-# CHECK-INST: vsseg8e64.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xe2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 e2 <unknown>
+vloxseg8ei16.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg8ei16.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x54,0x45,0xec]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 ec <unknown>
-vsseg8e128.v v24, (a0)
-# CHECK-INST: vsseg8e128.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xf2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 f2 <unknown>
+vloxseg8ei16.v v8, (a0), v4
+# CHECK-INST: vloxseg8ei16.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x54,0x45,0xee]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 54 45 ee <unknown>
-vsseg8e256.v v24, (a0)
-# CHECK-INST: vsseg8e256.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xf2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 f2 <unknown>
+vloxseg8ei32.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg8ei32.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x64,0x45,0xec]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 ec <unknown>
-vsseg8e512.v v24, (a0)
-# CHECK-INST: vsseg8e512.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xf2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 f2 <unknown>
+vloxseg8ei32.v v8, (a0), v4
+# CHECK-INST: vloxseg8ei32.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x64,0x45,0xee]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 64 45 ee <unknown>
-vsseg8e1024.v v24, (a0)
-# CHECK-INST: vsseg8e1024.v v24, (a0)
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xf2]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 f2 <unknown>
+vloxseg8ei64.v v8, (a0), v4, v0.t
+# CHECK-INST: vloxseg8ei64.v v8, (a0), v4, v0.t
+# CHECK-ENCODING: [0x07,0x74,0x45,0xec]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 ec <unknown>
-vsseg8e8.v v24, (a0), v0.t
-# CHECK-INST: vsseg8e8.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xe0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 e0 <unknown>
+vloxseg8ei64.v v8, (a0), v4
+# CHECK-INST: vloxseg8ei64.v v8, (a0), v4
+# CHECK-ENCODING: [0x07,0x74,0x45,0xee]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 07 74 45 ee <unknown>
-vsseg8e16.v v24, (a0), v0.t
-# CHECK-INST: vsseg8e16.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xe0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 e0 <unknown>
+vsseg2e8.v v24, (a0), v0.t
+# CHECK-INST: vsseg2e8.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x20]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 20 <unknown>
-vsseg8e32.v v24, (a0), v0.t
-# CHECK-INST: vsseg8e32.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xe0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 e0 <unknown>
+vsseg2e8.v v24, (a0)
+# CHECK-INST: vsseg2e8.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x22]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 22 <unknown>
-vsseg8e64.v v24, (a0), v0.t
-# CHECK-INST: vsseg8e64.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xe0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 e0 <unknown>
+vsseg2e16.v v24, (a0), v0.t
+# CHECK-INST: vsseg2e16.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x20]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 20 <unknown>
-vsseg8e128.v v24, (a0), v0.t
-# CHECK-INST: vsseg8e128.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x05,0xf0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 05 f0 <unknown>
+vsseg2e16.v v24, (a0)
+# CHECK-INST: vsseg2e16.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x22]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 22 <unknown>
-vsseg8e256.v v24, (a0), v0.t
-# CHECK-INST: vsseg8e256.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x05,0xf0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 05 f0 <unknown>
+vsseg2e32.v v24, (a0), v0.t
+# CHECK-INST: vsseg2e32.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x20]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 20 <unknown>
-vsseg8e512.v v24, (a0), v0.t
-# CHECK-INST: vsseg8e512.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x05,0xf0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 05 f0 <unknown>
+vsseg2e32.v v24, (a0)
+# CHECK-INST: vsseg2e32.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x22]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 22 <unknown>
-vsseg8e1024.v v24, (a0), v0.t
-# CHECK-INST: vsseg8e1024.v v24, (a0), v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x05,0xf0]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 05 f0 <unknown>
+vsseg2e64.v v24, (a0), v0.t
+# CHECK-INST: vsseg2e64.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x20]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 20 <unknown>
-vssseg2e8.v v24, (a0), a1
-# CHECK-INST: vssseg2e8.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0x2a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 2a <unknown>
+vsseg2e64.v v24, (a0)
+# CHECK-INST: vsseg2e64.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x22]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 22 <unknown>
-vssseg2e16.v v24, (a0), a1
-# CHECK-INST: vssseg2e16.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0x2a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 2a <unknown>
+vsseg2e128.v v24, (a0), v0.t
+# CHECK-INST: vsseg2e128.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x30]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 30 <unknown>
-vssseg2e32.v v24, (a0), a1
-# CHECK-INST: vssseg2e32.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0x2a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 2a <unknown>
+vsseg2e128.v v24, (a0)
+# CHECK-INST: vsseg2e128.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x32]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 32 <unknown>
-vssseg2e64.v v24, (a0), a1
-# CHECK-INST: vssseg2e64.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x2a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 2a <unknown>
+vsseg2e256.v v24, (a0), v0.t
+# CHECK-INST: vsseg2e256.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x30]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 30 <unknown>
-vssseg2e128.v v24, (a0), a1
-# CHECK-INST: vssseg2e128.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0x3a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 3a <unknown>
+vsseg2e256.v v24, (a0)
+# CHECK-INST: vsseg2e256.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x32]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 32 <unknown>
-vssseg2e256.v v24, (a0), a1
-# CHECK-INST: vssseg2e256.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0x3a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 3a <unknown>
+vsseg2e512.v v24, (a0), v0.t
+# CHECK-INST: vsseg2e512.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x30]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 30 <unknown>
-vssseg2e512.v v24, (a0), a1
-# CHECK-INST: vssseg2e512.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0x3a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 3a <unknown>
+vsseg2e512.v v24, (a0)
+# CHECK-INST: vsseg2e512.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x32]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 32 <unknown>
-vssseg2e1024.v v24, (a0), a1
-# CHECK-INST: vssseg2e1024.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x3a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 3a <unknown>
+vsseg2e1024.v v24, (a0), v0.t
+# CHECK-INST: vsseg2e1024.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x30]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 30 <unknown>
+
+vsseg2e1024.v v24, (a0)
+# CHECK-INST: vsseg2e1024.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x32]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 32 <unknown>
vssseg2e8.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg2e8.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x0c,0xb5,0x28]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 28 <unknown>
+vssseg2e8.v v24, (a0), a1
+# CHECK-INST: vssseg2e8.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0x2a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 2a <unknown>
+
vssseg2e16.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg2e16.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x5c,0xb5,0x28]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 28 <unknown>
+vssseg2e16.v v24, (a0), a1
+# CHECK-INST: vssseg2e16.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0x2a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 2a <unknown>
+
vssseg2e32.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg2e32.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x6c,0xb5,0x28]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 28 <unknown>
+vssseg2e32.v v24, (a0), a1
+# CHECK-INST: vssseg2e32.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0x2a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 2a <unknown>
+
vssseg2e64.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg2e64.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x7c,0xb5,0x28]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 28 <unknown>
+vssseg2e64.v v24, (a0), a1
+# CHECK-INST: vssseg2e64.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x2a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 2a <unknown>
+
vssseg2e128.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg2e128.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x0c,0xb5,0x38]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 38 <unknown>
+vssseg2e128.v v24, (a0), a1
+# CHECK-INST: vssseg2e128.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0x3a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 3a <unknown>
+
vssseg2e256.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg2e256.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x5c,0xb5,0x38]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 38 <unknown>
+vssseg2e256.v v24, (a0), a1
+# CHECK-INST: vssseg2e256.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0x3a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 3a <unknown>
+
vssseg2e512.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg2e512.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x6c,0xb5,0x38]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 38 <unknown>
+vssseg2e512.v v24, (a0), a1
+# CHECK-INST: vssseg2e512.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0x3a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 3a <unknown>
+
vssseg2e1024.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg2e1024.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x7c,0xb5,0x38]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 38 <unknown>
+vssseg2e1024.v v24, (a0), a1
+# CHECK-INST: vssseg2e1024.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x3a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 3a <unknown>
+
+vsuxseg2ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg2ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x24]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 24 <unknown>
+
+vsuxseg2ei8.v v24, (a0), v4
+# CHECK-INST: vsuxseg2ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x26]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 26 <unknown>
+
+vsuxseg2ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg2ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x24]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 24 <unknown>
+
+vsuxseg2ei16.v v24, (a0), v4
+# CHECK-INST: vsuxseg2ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x26]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 26 <unknown>
+
+vsuxseg2ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg2ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x24]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 24 <unknown>
+
+vsuxseg2ei32.v v24, (a0), v4
+# CHECK-INST: vsuxseg2ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x26]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 26 <unknown>
+
+vsuxseg2ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg2ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x24]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 24 <unknown>
+
+vsuxseg2ei64.v v24, (a0), v4
+# CHECK-INST: vsuxseg2ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x26]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 26 <unknown>
+
+vsoxseg2ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg2ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x2c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 2c <unknown>
+
+vsoxseg2ei8.v v24, (a0), v4
+# CHECK-INST: vsoxseg2ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x2e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 2e <unknown>
+
+vsoxseg2ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg2ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x2c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 2c <unknown>
+
+vsoxseg2ei16.v v24, (a0), v4
+# CHECK-INST: vsoxseg2ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x2e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 2e <unknown>
+
+vsoxseg2ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg2ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x2c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 2c <unknown>
+
+vsoxseg2ei32.v v24, (a0), v4
+# CHECK-INST: vsoxseg2ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x2e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 2e <unknown>
+
+vsoxseg2ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg2ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x2c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 2c <unknown>
+
+vsoxseg2ei64.v v24, (a0), v4
+# CHECK-INST: vsoxseg2ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x2e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 2e <unknown>
+
+vsseg3e8.v v24, (a0), v0.t
+# CHECK-INST: vsseg3e8.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x40]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 40 <unknown>
+
+vsseg3e8.v v24, (a0)
+# CHECK-INST: vsseg3e8.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x42]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 42 <unknown>
+
+vsseg3e16.v v24, (a0), v0.t
+# CHECK-INST: vsseg3e16.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x40]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 40 <unknown>
+
+vsseg3e16.v v24, (a0)
+# CHECK-INST: vsseg3e16.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x42]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 42 <unknown>
+
+vsseg3e32.v v24, (a0), v0.t
+# CHECK-INST: vsseg3e32.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x40]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 40 <unknown>
+
+vsseg3e32.v v24, (a0)
+# CHECK-INST: vsseg3e32.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x42]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 42 <unknown>
+
+vsseg3e64.v v24, (a0), v0.t
+# CHECK-INST: vsseg3e64.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x40]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 40 <unknown>
+
+vsseg3e64.v v24, (a0)
+# CHECK-INST: vsseg3e64.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x42]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 42 <unknown>
+
+vsseg3e128.v v24, (a0), v0.t
+# CHECK-INST: vsseg3e128.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x50]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 50 <unknown>
+
+vsseg3e128.v v24, (a0)
+# CHECK-INST: vsseg3e128.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x52]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 52 <unknown>
+
+vsseg3e256.v v24, (a0), v0.t
+# CHECK-INST: vsseg3e256.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x50]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 50 <unknown>
+
+vsseg3e256.v v24, (a0)
+# CHECK-INST: vsseg3e256.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x52]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 52 <unknown>
+
+vsseg3e512.v v24, (a0), v0.t
+# CHECK-INST: vsseg3e512.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x50]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 50 <unknown>
+
+vsseg3e512.v v24, (a0)
+# CHECK-INST: vsseg3e512.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x52]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 52 <unknown>
+
+vsseg3e1024.v v24, (a0), v0.t
+# CHECK-INST: vsseg3e1024.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x50]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 50 <unknown>
+
+vsseg3e1024.v v24, (a0)
+# CHECK-INST: vsseg3e1024.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x52]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 52 <unknown>
+
+vssseg3e8.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg3e8.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0x48]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 48 <unknown>
+
vssseg3e8.v v24, (a0), a1
# CHECK-INST: vssseg3e8.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x0c,0xb5,0x4a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 4a <unknown>
+vssseg3e16.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg3e16.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0x48]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 48 <unknown>
+
vssseg3e16.v v24, (a0), a1
# CHECK-INST: vssseg3e16.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x5c,0xb5,0x4a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 4a <unknown>
+vssseg3e32.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg3e32.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0x48]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 48 <unknown>
+
vssseg3e32.v v24, (a0), a1
# CHECK-INST: vssseg3e32.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x6c,0xb5,0x4a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 4a <unknown>
+vssseg3e64.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg3e64.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x48]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 48 <unknown>
+
vssseg3e64.v v24, (a0), a1
# CHECK-INST: vssseg3e64.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x7c,0xb5,0x4a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 4a <unknown>
+vssseg3e128.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg3e128.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 58 <unknown>
+
vssseg3e128.v v24, (a0), a1
# CHECK-INST: vssseg3e128.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x0c,0xb5,0x5a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 5a <unknown>
+vssseg3e256.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg3e256.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 58 <unknown>
+
vssseg3e256.v v24, (a0), a1
# CHECK-INST: vssseg3e256.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x5c,0xb5,0x5a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 5a <unknown>
+vssseg3e512.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg3e512.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 58 <unknown>
+
vssseg3e512.v v24, (a0), a1
# CHECK-INST: vssseg3e512.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x6c,0xb5,0x5a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 5a <unknown>
-vssseg3e1024.v v24, (a0), a1
-# CHECK-INST: vssseg3e1024.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x5a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 5a <unknown>
+vssseg3e1024.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg3e1024.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x58]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 58 <unknown>
+
+vssseg3e1024.v v24, (a0), a1
+# CHECK-INST: vssseg3e1024.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x5a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 5a <unknown>
+
+vsuxseg3ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg3ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 44 <unknown>
+
+vsuxseg3ei8.v v24, (a0), v4
+# CHECK-INST: vsuxseg3ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 46 <unknown>
+
+vsuxseg3ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg3ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 44 <unknown>
+
+vsuxseg3ei16.v v24, (a0), v4
+# CHECK-INST: vsuxseg3ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 46 <unknown>
+
+vsuxseg3ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg3ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 44 <unknown>
+
+vsuxseg3ei32.v v24, (a0), v4
+# CHECK-INST: vsuxseg3ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 46 <unknown>
+
+vsuxseg3ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg3ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x44]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 44 <unknown>
+
+vsuxseg3ei64.v v24, (a0), v4
+# CHECK-INST: vsuxseg3ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x46]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 46 <unknown>
+
+vsoxseg3ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg3ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 4c <unknown>
+
+vsoxseg3ei8.v v24, (a0), v4
+# CHECK-INST: vsoxseg3ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 4e <unknown>
+
+vsoxseg3ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg3ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 4c <unknown>
+
+vsoxseg3ei16.v v24, (a0), v4
+# CHECK-INST: vsoxseg3ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 4e <unknown>
+
+vsoxseg3ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg3ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 4c <unknown>
+
+vsoxseg3ei32.v v24, (a0), v4
+# CHECK-INST: vsoxseg3ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 4e <unknown>
+
+vsoxseg3ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg3ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x4c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 4c <unknown>
+
+vsoxseg3ei64.v v24, (a0), v4
+# CHECK-INST: vsoxseg3ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x4e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 4e <unknown>
+
+vsseg4e8.v v24, (a0), v0.t
+# CHECK-INST: vsseg4e8.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x60]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 60 <unknown>
+
+vsseg4e8.v v24, (a0)
+# CHECK-INST: vsseg4e8.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x62]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 62 <unknown>
+
+vsseg4e16.v v24, (a0), v0.t
+# CHECK-INST: vsseg4e16.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x60]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 60 <unknown>
+
+vsseg4e16.v v24, (a0)
+# CHECK-INST: vsseg4e16.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x62]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 62 <unknown>
+
+vsseg4e32.v v24, (a0), v0.t
+# CHECK-INST: vsseg4e32.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x60]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 60 <unknown>
+
+vsseg4e32.v v24, (a0)
+# CHECK-INST: vsseg4e32.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x62]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 62 <unknown>
+
+vsseg4e64.v v24, (a0), v0.t
+# CHECK-INST: vsseg4e64.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x60]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 60 <unknown>
+
+vsseg4e64.v v24, (a0)
+# CHECK-INST: vsseg4e64.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x62]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 62 <unknown>
+
+vsseg4e128.v v24, (a0), v0.t
+# CHECK-INST: vsseg4e128.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x70]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 70 <unknown>
-vssseg3e8.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg3e8.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0x48]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 48 <unknown>
+vsseg4e128.v v24, (a0)
+# CHECK-INST: vsseg4e128.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x72]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 72 <unknown>
-vssseg3e16.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg3e16.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0x48]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 48 <unknown>
+vsseg4e256.v v24, (a0), v0.t
+# CHECK-INST: vsseg4e256.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x70]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 70 <unknown>
-vssseg3e32.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg3e32.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0x48]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 48 <unknown>
+vsseg4e256.v v24, (a0)
+# CHECK-INST: vsseg4e256.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x72]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 72 <unknown>
-vssseg3e64.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg3e64.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x48]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 48 <unknown>
+vsseg4e512.v v24, (a0), v0.t
+# CHECK-INST: vsseg4e512.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x70]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 70 <unknown>
-vssseg3e128.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg3e128.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0x58]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 58 <unknown>
+vsseg4e512.v v24, (a0)
+# CHECK-INST: vsseg4e512.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x72]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 72 <unknown>
-vssseg3e256.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg3e256.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0x58]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 58 <unknown>
+vsseg4e1024.v v24, (a0), v0.t
+# CHECK-INST: vsseg4e1024.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x70]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 70 <unknown>
-vssseg3e512.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg3e512.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0x58]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 58 <unknown>
+vsseg4e1024.v v24, (a0)
+# CHECK-INST: vsseg4e1024.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x72]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 72 <unknown>
-vssseg3e1024.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg3e1024.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x58]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 58 <unknown>
+vssseg4e8.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg4e8.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0x68]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 68 <unknown>
vssseg4e8.v v24, (a0), a1
# CHECK-INST: vssseg4e8.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x0c,0xb5,0x6a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 6a <unknown>
+vssseg4e16.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg4e16.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0x68]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 68 <unknown>
+
vssseg4e16.v v24, (a0), a1
# CHECK-INST: vssseg4e16.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x5c,0xb5,0x6a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 6a <unknown>
+vssseg4e32.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg4e32.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0x68]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 68 <unknown>
+
vssseg4e32.v v24, (a0), a1
# CHECK-INST: vssseg4e32.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x6c,0xb5,0x6a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 6a <unknown>
+vssseg4e64.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg4e64.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x68]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 68 <unknown>
+
vssseg4e64.v v24, (a0), a1
# CHECK-INST: vssseg4e64.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x7c,0xb5,0x6a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 6a <unknown>
+vssseg4e128.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg4e128.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0x78]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 78 <unknown>
+
vssseg4e128.v v24, (a0), a1
# CHECK-INST: vssseg4e128.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x0c,0xb5,0x7a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 7a <unknown>
+vssseg4e256.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg4e256.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0x78]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 78 <unknown>
+
vssseg4e256.v v24, (a0), a1
# CHECK-INST: vssseg4e256.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x5c,0xb5,0x7a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 7a <unknown>
+vssseg4e512.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg4e512.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0x78]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 78 <unknown>
+
vssseg4e512.v v24, (a0), a1
# CHECK-INST: vssseg4e512.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x6c,0xb5,0x7a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 7a <unknown>
+vssseg4e1024.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg4e1024.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x78]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 78 <unknown>
+
vssseg4e1024.v v24, (a0), a1
# CHECK-INST: vssseg4e1024.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x7c,0xb5,0x7a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 7a <unknown>
-vssseg4e8.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg4e8.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0x68]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 68 <unknown>
+vsuxseg4ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg4ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x64]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 64 <unknown>
+
+vsuxseg4ei8.v v24, (a0), v4
+# CHECK-INST: vsuxseg4ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x66]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 66 <unknown>
+
+vsuxseg4ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg4ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x64]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 64 <unknown>
+
+vsuxseg4ei16.v v24, (a0), v4
+# CHECK-INST: vsuxseg4ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x66]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 66 <unknown>
+
+vsuxseg4ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg4ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x64]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 64 <unknown>
+
+vsuxseg4ei32.v v24, (a0), v4
+# CHECK-INST: vsuxseg4ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x66]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 66 <unknown>
+
+vsuxseg4ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg4ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x64]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 64 <unknown>
+
+vsuxseg4ei64.v v24, (a0), v4
+# CHECK-INST: vsuxseg4ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x66]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 66 <unknown>
+
+vsoxseg4ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg4ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x6c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 6c <unknown>
-vssseg4e16.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg4e16.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0x68]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 68 <unknown>
+vsoxseg4ei8.v v24, (a0), v4
+# CHECK-INST: vsoxseg4ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x6e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 6e <unknown>
-vssseg4e32.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg4e32.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0x68]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 68 <unknown>
+vsoxseg4ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg4ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x6c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 6c <unknown>
-vssseg4e64.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg4e64.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x68]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 68 <unknown>
+vsoxseg4ei16.v v24, (a0), v4
+# CHECK-INST: vsoxseg4ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x6e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 6e <unknown>
-vssseg4e128.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg4e128.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0x78]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 78 <unknown>
+vsoxseg4ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg4ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x6c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 6c <unknown>
-vssseg4e256.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg4e256.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0x78]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 78 <unknown>
+vsoxseg4ei32.v v24, (a0), v4
+# CHECK-INST: vsoxseg4ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x6e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 6e <unknown>
-vssseg4e512.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg4e512.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0x78]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 78 <unknown>
+vsoxseg4ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg4ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x6c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 6c <unknown>
-vssseg4e1024.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg4e1024.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x78]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 78 <unknown>
+vsoxseg4ei64.v v24, (a0), v4
+# CHECK-INST: vsoxseg4ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x6e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 6e <unknown>
-vssseg5e8.v v24, (a0), a1
-# CHECK-INST: vssseg5e8.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0x8a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 8a <unknown>
+vsseg5e8.v v24, (a0), v0.t
+# CHECK-INST: vsseg5e8.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x80]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 80 <unknown>
-vssseg5e16.v v24, (a0), a1
-# CHECK-INST: vssseg5e16.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0x8a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 8a <unknown>
+vsseg5e8.v v24, (a0)
+# CHECK-INST: vsseg5e8.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 82 <unknown>
-vssseg5e32.v v24, (a0), a1
-# CHECK-INST: vssseg5e32.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0x8a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 8a <unknown>
+vsseg5e16.v v24, (a0), v0.t
+# CHECK-INST: vsseg5e16.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x80]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 80 <unknown>
-vssseg5e64.v v24, (a0), a1
-# CHECK-INST: vssseg5e64.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x8a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 8a <unknown>
+vsseg5e16.v v24, (a0)
+# CHECK-INST: vsseg5e16.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 82 <unknown>
-vssseg5e128.v v24, (a0), a1
-# CHECK-INST: vssseg5e128.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0x9a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 9a <unknown>
+vsseg5e32.v v24, (a0), v0.t
+# CHECK-INST: vsseg5e32.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x80]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 80 <unknown>
-vssseg5e256.v v24, (a0), a1
-# CHECK-INST: vssseg5e256.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0x9a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 9a <unknown>
+vsseg5e32.v v24, (a0)
+# CHECK-INST: vsseg5e32.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 82 <unknown>
-vssseg5e512.v v24, (a0), a1
-# CHECK-INST: vssseg5e512.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0x9a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 9a <unknown>
+vsseg5e64.v v24, (a0), v0.t
+# CHECK-INST: vsseg5e64.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x80]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 80 <unknown>
-vssseg5e1024.v v24, (a0), a1
-# CHECK-INST: vssseg5e1024.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0x9a]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 9a <unknown>
+vsseg5e64.v v24, (a0)
+# CHECK-INST: vsseg5e64.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x82]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 82 <unknown>
+
+vsseg5e128.v v24, (a0), v0.t
+# CHECK-INST: vsseg5e128.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x90]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 90 <unknown>
+
+vsseg5e128.v v24, (a0)
+# CHECK-INST: vsseg5e128.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0x92]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 92 <unknown>
+
+vsseg5e256.v v24, (a0), v0.t
+# CHECK-INST: vsseg5e256.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x90]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 90 <unknown>
+
+vsseg5e256.v v24, (a0)
+# CHECK-INST: vsseg5e256.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0x92]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 92 <unknown>
+
+vsseg5e512.v v24, (a0), v0.t
+# CHECK-INST: vsseg5e512.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x90]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 90 <unknown>
+
+vsseg5e512.v v24, (a0)
+# CHECK-INST: vsseg5e512.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0x92]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 92 <unknown>
+
+vsseg5e1024.v v24, (a0), v0.t
+# CHECK-INST: vsseg5e1024.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x90]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 90 <unknown>
+
+vsseg5e1024.v v24, (a0)
+# CHECK-INST: vsseg5e1024.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0x92]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 92 <unknown>
vssseg5e8.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg5e8.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x0c,0xb5,0x88]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 88 <unknown>
+vssseg5e8.v v24, (a0), a1
+# CHECK-INST: vssseg5e8.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0x8a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 8a <unknown>
+
vssseg5e16.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg5e16.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x5c,0xb5,0x88]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 88 <unknown>
+vssseg5e16.v v24, (a0), a1
+# CHECK-INST: vssseg5e16.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0x8a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 8a <unknown>
+
vssseg5e32.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg5e32.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x6c,0xb5,0x88]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 88 <unknown>
+vssseg5e32.v v24, (a0), a1
+# CHECK-INST: vssseg5e32.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0x8a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 8a <unknown>
+
vssseg5e64.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg5e64.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x7c,0xb5,0x88]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 88 <unknown>
+vssseg5e64.v v24, (a0), a1
+# CHECK-INST: vssseg5e64.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x8a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 8a <unknown>
+
vssseg5e128.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg5e128.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x0c,0xb5,0x98]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 98 <unknown>
+vssseg5e128.v v24, (a0), a1
+# CHECK-INST: vssseg5e128.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0x9a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 9a <unknown>
+
vssseg5e256.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg5e256.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x5c,0xb5,0x98]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 98 <unknown>
+vssseg5e256.v v24, (a0), a1
+# CHECK-INST: vssseg5e256.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0x9a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 9a <unknown>
+
vssseg5e512.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg5e512.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x6c,0xb5,0x98]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 98 <unknown>
+vssseg5e512.v v24, (a0), a1
+# CHECK-INST: vssseg5e512.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0x9a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 9a <unknown>
+
vssseg5e1024.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg5e1024.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x7c,0xb5,0x98]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 98 <unknown>
+vssseg5e1024.v v24, (a0), a1
+# CHECK-INST: vssseg5e1024.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0x9a]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 9a <unknown>
+
+vsuxseg5ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg5ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x84]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 84 <unknown>
+
+vsuxseg5ei8.v v24, (a0), v4
+# CHECK-INST: vsuxseg5ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x86]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 86 <unknown>
+
+vsuxseg5ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg5ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x84]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 84 <unknown>
+
+vsuxseg5ei16.v v24, (a0), v4
+# CHECK-INST: vsuxseg5ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x86]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 86 <unknown>
+
+vsuxseg5ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg5ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x84]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 84 <unknown>
+
+vsuxseg5ei32.v v24, (a0), v4
+# CHECK-INST: vsuxseg5ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x86]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 86 <unknown>
+
+vsuxseg5ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg5ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x84]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 84 <unknown>
+
+vsuxseg5ei64.v v24, (a0), v4
+# CHECK-INST: vsuxseg5ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x86]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 86 <unknown>
+
+vsoxseg5ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg5ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x8c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 8c <unknown>
+
+vsoxseg5ei8.v v24, (a0), v4
+# CHECK-INST: vsoxseg5ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0x8e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 8e <unknown>
+
+vsoxseg5ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg5ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x8c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 8c <unknown>
+
+vsoxseg5ei16.v v24, (a0), v4
+# CHECK-INST: vsoxseg5ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0x8e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 8e <unknown>
+
+vsoxseg5ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg5ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x8c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 8c <unknown>
+
+vsoxseg5ei32.v v24, (a0), v4
+# CHECK-INST: vsoxseg5ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0x8e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 8e <unknown>
+
+vsoxseg5ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg5ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x8c]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 8c <unknown>
+
+vsoxseg5ei64.v v24, (a0), v4
+# CHECK-INST: vsoxseg5ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0x8e]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 8e <unknown>
+
+vsseg6e8.v v24, (a0), v0.t
+# CHECK-INST: vsseg6e8.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xa0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 a0 <unknown>
+
+vsseg6e8.v v24, (a0)
+# CHECK-INST: vsseg6e8.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xa2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 a2 <unknown>
+
+vsseg6e16.v v24, (a0), v0.t
+# CHECK-INST: vsseg6e16.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xa0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 a0 <unknown>
+
+vsseg6e16.v v24, (a0)
+# CHECK-INST: vsseg6e16.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xa2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 a2 <unknown>
+
+vsseg6e32.v v24, (a0), v0.t
+# CHECK-INST: vsseg6e32.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xa0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 a0 <unknown>
+
+vsseg6e32.v v24, (a0)
+# CHECK-INST: vsseg6e32.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xa2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 a2 <unknown>
+
+vsseg6e64.v v24, (a0), v0.t
+# CHECK-INST: vsseg6e64.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xa0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 a0 <unknown>
+
+vsseg6e64.v v24, (a0)
+# CHECK-INST: vsseg6e64.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xa2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 a2 <unknown>
+
+vsseg6e128.v v24, (a0), v0.t
+# CHECK-INST: vsseg6e128.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xb0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 b0 <unknown>
+
+vsseg6e128.v v24, (a0)
+# CHECK-INST: vsseg6e128.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xb2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 b2 <unknown>
+
+vsseg6e256.v v24, (a0), v0.t
+# CHECK-INST: vsseg6e256.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xb0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 b0 <unknown>
+
+vsseg6e256.v v24, (a0)
+# CHECK-INST: vsseg6e256.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xb2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 b2 <unknown>
+
+vsseg6e512.v v24, (a0), v0.t
+# CHECK-INST: vsseg6e512.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xb0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 b0 <unknown>
+
+vsseg6e512.v v24, (a0)
+# CHECK-INST: vsseg6e512.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xb2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 b2 <unknown>
+
+vsseg6e1024.v v24, (a0), v0.t
+# CHECK-INST: vsseg6e1024.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xb0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 b0 <unknown>
+
+vsseg6e1024.v v24, (a0)
+# CHECK-INST: vsseg6e1024.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xb2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 b2 <unknown>
+
+vssseg6e8.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg6e8.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0xa8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 a8 <unknown>
+
vssseg6e8.v v24, (a0), a1
# CHECK-INST: vssseg6e8.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x0c,0xb5,0xaa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 aa <unknown>
-vssseg6e16.v v24, (a0), a1
-# CHECK-INST: vssseg6e16.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0xaa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 aa <unknown>
+vssseg6e16.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg6e16.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0xa8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 a8 <unknown>
+
+vssseg6e16.v v24, (a0), a1
+# CHECK-INST: vssseg6e16.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0xaa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 aa <unknown>
+
+vssseg6e32.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg6e32.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0xa8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 a8 <unknown>
+
+vssseg6e32.v v24, (a0), a1
+# CHECK-INST: vssseg6e32.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0xaa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 aa <unknown>
+
+vssseg6e64.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg6e64.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0xa8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 a8 <unknown>
+
+vssseg6e64.v v24, (a0), a1
+# CHECK-INST: vssseg6e64.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0xaa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 aa <unknown>
+
+vssseg6e128.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg6e128.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0xb8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 b8 <unknown>
+
+vssseg6e128.v v24, (a0), a1
+# CHECK-INST: vssseg6e128.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0xba]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 ba <unknown>
+
+vssseg6e256.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg6e256.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0xb8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 b8 <unknown>
+
+vssseg6e256.v v24, (a0), a1
+# CHECK-INST: vssseg6e256.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0xba]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 ba <unknown>
+
+vssseg6e512.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg6e512.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0xb8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 b8 <unknown>
+
+vssseg6e512.v v24, (a0), a1
+# CHECK-INST: vssseg6e512.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0xba]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 ba <unknown>
+
+vssseg6e1024.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg6e1024.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0xb8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 b8 <unknown>
+
+vssseg6e1024.v v24, (a0), a1
+# CHECK-INST: vssseg6e1024.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0xba]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 ba <unknown>
+
+vsuxseg6ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg6ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xa4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 a4 <unknown>
+
+vsuxseg6ei8.v v24, (a0), v4
+# CHECK-INST: vsuxseg6ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xa6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 a6 <unknown>
+
+vsuxseg6ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg6ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xa4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 a4 <unknown>
+
+vsuxseg6ei16.v v24, (a0), v4
+# CHECK-INST: vsuxseg6ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xa6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 a6 <unknown>
+
+vsuxseg6ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg6ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xa4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 a4 <unknown>
+
+vsuxseg6ei32.v v24, (a0), v4
+# CHECK-INST: vsuxseg6ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xa6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 a6 <unknown>
+
+vsuxseg6ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg6ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xa4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 a4 <unknown>
+
+vsuxseg6ei64.v v24, (a0), v4
+# CHECK-INST: vsuxseg6ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xa6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 a6 <unknown>
+
+vsoxseg6ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg6ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xac]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 ac <unknown>
+
+vsoxseg6ei8.v v24, (a0), v4
+# CHECK-INST: vsoxseg6ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xae]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 ae <unknown>
+
+vsoxseg6ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg6ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xac]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 ac <unknown>
+
+vsoxseg6ei16.v v24, (a0), v4
+# CHECK-INST: vsoxseg6ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xae]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 ae <unknown>
+
+vsoxseg6ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg6ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xac]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 ac <unknown>
+
+vsoxseg6ei32.v v24, (a0), v4
+# CHECK-INST: vsoxseg6ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xae]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 ae <unknown>
+
+vsoxseg6ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg6ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xac]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 ac <unknown>
+
+vsoxseg6ei64.v v24, (a0), v4
+# CHECK-INST: vsoxseg6ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xae]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 ae <unknown>
+
+vsseg7e8.v v24, (a0), v0.t
+# CHECK-INST: vsseg7e8.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xc0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 c0 <unknown>
+
+vsseg7e8.v v24, (a0)
+# CHECK-INST: vsseg7e8.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xc2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 c2 <unknown>
+
+vsseg7e16.v v24, (a0), v0.t
+# CHECK-INST: vsseg7e16.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xc0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 c0 <unknown>
-vssseg6e32.v v24, (a0), a1
-# CHECK-INST: vssseg6e32.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0xaa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 aa <unknown>
+vsseg7e16.v v24, (a0)
+# CHECK-INST: vsseg7e16.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xc2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 c2 <unknown>
-vssseg6e64.v v24, (a0), a1
-# CHECK-INST: vssseg6e64.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0xaa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 aa <unknown>
+vsseg7e32.v v24, (a0), v0.t
+# CHECK-INST: vsseg7e32.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xc0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 c0 <unknown>
-vssseg6e128.v v24, (a0), a1
-# CHECK-INST: vssseg6e128.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0xba]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 ba <unknown>
+vsseg7e32.v v24, (a0)
+# CHECK-INST: vsseg7e32.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xc2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 c2 <unknown>
-vssseg6e256.v v24, (a0), a1
-# CHECK-INST: vssseg6e256.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0xba]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 ba <unknown>
+vsseg7e64.v v24, (a0), v0.t
+# CHECK-INST: vsseg7e64.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xc0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 c0 <unknown>
-vssseg6e512.v v24, (a0), a1
-# CHECK-INST: vssseg6e512.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0xba]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 ba <unknown>
+vsseg7e64.v v24, (a0)
+# CHECK-INST: vsseg7e64.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xc2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 c2 <unknown>
-vssseg6e1024.v v24, (a0), a1
-# CHECK-INST: vssseg6e1024.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0xba]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 ba <unknown>
+vsseg7e128.v v24, (a0), v0.t
+# CHECK-INST: vsseg7e128.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xd0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 d0 <unknown>
-vssseg6e8.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg6e8.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0xa8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 a8 <unknown>
+vsseg7e128.v v24, (a0)
+# CHECK-INST: vsseg7e128.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xd2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 d2 <unknown>
-vssseg6e16.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg6e16.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0xa8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 a8 <unknown>
+vsseg7e256.v v24, (a0), v0.t
+# CHECK-INST: vsseg7e256.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xd0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 d0 <unknown>
-vssseg6e32.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg6e32.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0xa8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 a8 <unknown>
+vsseg7e256.v v24, (a0)
+# CHECK-INST: vsseg7e256.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xd2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 d2 <unknown>
-vssseg6e64.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg6e64.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0xa8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 a8 <unknown>
+vsseg7e512.v v24, (a0), v0.t
+# CHECK-INST: vsseg7e512.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xd0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 d0 <unknown>
-vssseg6e128.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg6e128.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0xb8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 b8 <unknown>
+vsseg7e512.v v24, (a0)
+# CHECK-INST: vsseg7e512.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xd2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 d2 <unknown>
-vssseg6e256.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg6e256.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0xb8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 b8 <unknown>
+vsseg7e1024.v v24, (a0), v0.t
+# CHECK-INST: vsseg7e1024.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xd0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 d0 <unknown>
-vssseg6e512.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg6e512.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0xb8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 b8 <unknown>
+vsseg7e1024.v v24, (a0)
+# CHECK-INST: vsseg7e1024.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xd2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 d2 <unknown>
-vssseg6e1024.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg6e1024.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0xb8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 b8 <unknown>
+vssseg7e8.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg7e8.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0xc8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 c8 <unknown>
vssseg7e8.v v24, (a0), a1
# CHECK-INST: vssseg7e8.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x0c,0xb5,0xca]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 ca <unknown>
+vssseg7e16.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg7e16.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0xc8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 c8 <unknown>
+
vssseg7e16.v v24, (a0), a1
# CHECK-INST: vssseg7e16.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x5c,0xb5,0xca]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 ca <unknown>
+vssseg7e32.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg7e32.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0xc8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 c8 <unknown>
+
vssseg7e32.v v24, (a0), a1
# CHECK-INST: vssseg7e32.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x6c,0xb5,0xca]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 ca <unknown>
+vssseg7e64.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg7e64.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0xc8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 c8 <unknown>
+
vssseg7e64.v v24, (a0), a1
# CHECK-INST: vssseg7e64.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x7c,0xb5,0xca]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 ca <unknown>
+vssseg7e128.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg7e128.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0xd8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 d8 <unknown>
+
vssseg7e128.v v24, (a0), a1
# CHECK-INST: vssseg7e128.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x0c,0xb5,0xda]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 da <unknown>
+vssseg7e256.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg7e256.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0xd8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 d8 <unknown>
+
vssseg7e256.v v24, (a0), a1
# CHECK-INST: vssseg7e256.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x5c,0xb5,0xda]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 da <unknown>
+vssseg7e512.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg7e512.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0xd8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 d8 <unknown>
+
vssseg7e512.v v24, (a0), a1
# CHECK-INST: vssseg7e512.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x6c,0xb5,0xda]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 da <unknown>
+vssseg7e1024.v v24, (a0), a1, v0.t
+# CHECK-INST: vssseg7e1024.v v24, (a0), a1, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0xd8]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 d8 <unknown>
+
vssseg7e1024.v v24, (a0), a1
# CHECK-INST: vssseg7e1024.v v24, (a0), a1
# CHECK-ENCODING: [0x27,0x7c,0xb5,0xda]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 da <unknown>
-vssseg7e8.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg7e8.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0xc8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 c8 <unknown>
+vsuxseg7ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg7ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xc4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 c4 <unknown>
+
+vsuxseg7ei8.v v24, (a0), v4
+# CHECK-INST: vsuxseg7ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xc6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 c6 <unknown>
+
+vsuxseg7ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg7ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xc4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 c4 <unknown>
+
+vsuxseg7ei16.v v24, (a0), v4
+# CHECK-INST: vsuxseg7ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xc6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 c6 <unknown>
+
+vsuxseg7ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg7ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xc4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 c4 <unknown>
+
+vsuxseg7ei32.v v24, (a0), v4
+# CHECK-INST: vsuxseg7ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xc6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 c6 <unknown>
+
+vsuxseg7ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg7ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xc4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 c4 <unknown>
+
+vsuxseg7ei64.v v24, (a0), v4
+# CHECK-INST: vsuxseg7ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xc6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 c6 <unknown>
+
+vsoxseg7ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg7ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xcc]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 cc <unknown>
-vssseg7e16.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg7e16.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0xc8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 c8 <unknown>
+vsoxseg7ei8.v v24, (a0), v4
+# CHECK-INST: vsoxseg7ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xce]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 ce <unknown>
-vssseg7e32.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg7e32.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0xc8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 c8 <unknown>
+vsoxseg7ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg7ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xcc]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 cc <unknown>
-vssseg7e64.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg7e64.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0xc8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 c8 <unknown>
+vsoxseg7ei16.v v24, (a0), v4
+# CHECK-INST: vsoxseg7ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xce]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 ce <unknown>
-vssseg7e128.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg7e128.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0xd8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 d8 <unknown>
+vsoxseg7ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg7ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xcc]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 cc <unknown>
-vssseg7e256.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg7e256.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0xd8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 d8 <unknown>
+vsoxseg7ei32.v v24, (a0), v4
+# CHECK-INST: vsoxseg7ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xce]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 ce <unknown>
-vssseg7e512.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg7e512.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0xd8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 d8 <unknown>
+vsoxseg7ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg7ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xcc]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 cc <unknown>
-vssseg7e1024.v v24, (a0), a1, v0.t
-# CHECK-INST: vssseg7e1024.v v24, (a0), a1, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0xd8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 d8 <unknown>
+vsoxseg7ei64.v v24, (a0), v4
+# CHECK-INST: vsoxseg7ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xce]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 ce <unknown>
-vssseg8e8.v v24, (a0), a1
-# CHECK-INST: vssseg8e8.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0xea]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 ea <unknown>
+vsseg8e8.v v24, (a0), v0.t
+# CHECK-INST: vsseg8e8.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xe0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 e0 <unknown>
-vssseg8e16.v v24, (a0), a1
-# CHECK-INST: vssseg8e16.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0xea]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 ea <unknown>
+vsseg8e8.v v24, (a0)
+# CHECK-INST: vsseg8e8.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xe2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 e2 <unknown>
+
+vsseg8e16.v v24, (a0), v0.t
+# CHECK-INST: vsseg8e16.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xe0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 e0 <unknown>
+
+vsseg8e16.v v24, (a0)
+# CHECK-INST: vsseg8e16.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xe2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 e2 <unknown>
+
+vsseg8e32.v v24, (a0), v0.t
+# CHECK-INST: vsseg8e32.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xe0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 e0 <unknown>
+
+vsseg8e32.v v24, (a0)
+# CHECK-INST: vsseg8e32.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xe2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 e2 <unknown>
+
+vsseg8e64.v v24, (a0), v0.t
+# CHECK-INST: vsseg8e64.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xe0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 e0 <unknown>
+
+vsseg8e64.v v24, (a0)
+# CHECK-INST: vsseg8e64.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xe2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 e2 <unknown>
+
+vsseg8e128.v v24, (a0), v0.t
+# CHECK-INST: vsseg8e128.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xf0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 f0 <unknown>
-vssseg8e32.v v24, (a0), a1
-# CHECK-INST: vssseg8e32.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0xea]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 ea <unknown>
+vsseg8e128.v v24, (a0)
+# CHECK-INST: vsseg8e128.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x0c,0x05,0xf2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 05 f2 <unknown>
-vssseg8e64.v v24, (a0), a1
-# CHECK-INST: vssseg8e64.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0xea]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 ea <unknown>
+vsseg8e256.v v24, (a0), v0.t
+# CHECK-INST: vsseg8e256.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xf0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 f0 <unknown>
-vssseg8e128.v v24, (a0), a1
-# CHECK-INST: vssseg8e128.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x0c,0xb5,0xfa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c b5 fa <unknown>
+vsseg8e256.v v24, (a0)
+# CHECK-INST: vsseg8e256.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x5c,0x05,0xf2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 05 f2 <unknown>
-vssseg8e256.v v24, (a0), a1
-# CHECK-INST: vssseg8e256.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x5c,0xb5,0xfa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c b5 fa <unknown>
+vsseg8e512.v v24, (a0), v0.t
+# CHECK-INST: vsseg8e512.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xf0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 f0 <unknown>
-vssseg8e512.v v24, (a0), a1
-# CHECK-INST: vssseg8e512.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x6c,0xb5,0xfa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c b5 fa <unknown>
+vsseg8e512.v v24, (a0)
+# CHECK-INST: vsseg8e512.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x6c,0x05,0xf2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 05 f2 <unknown>
-vssseg8e1024.v v24, (a0), a1
-# CHECK-INST: vssseg8e1024.v v24, (a0), a1
-# CHECK-ENCODING: [0x27,0x7c,0xb5,0xfa]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c b5 fa <unknown>
+vsseg8e1024.v v24, (a0), v0.t
+# CHECK-INST: vsseg8e1024.v v24, (a0), v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xf0]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 f0 <unknown>
+
+vsseg8e1024.v v24, (a0)
+# CHECK-INST: vsseg8e1024.v v24, (a0)
+# CHECK-ENCODING: [0x27,0x7c,0x05,0xf2]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 05 f2 <unknown>
vssseg8e8.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg8e8.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x0c,0xb5,0xe8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 e8 <unknown>
+vssseg8e8.v v24, (a0), a1
+# CHECK-INST: vssseg8e8.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0xea]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 ea <unknown>
+
vssseg8e16.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg8e16.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x5c,0xb5,0xe8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 e8 <unknown>
+vssseg8e16.v v24, (a0), a1
+# CHECK-INST: vssseg8e16.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0xea]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 ea <unknown>
+
vssseg8e32.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg8e32.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x6c,0xb5,0xe8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 e8 <unknown>
+vssseg8e32.v v24, (a0), a1
+# CHECK-INST: vssseg8e32.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0xea]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 ea <unknown>
+
vssseg8e64.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg8e64.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x7c,0xb5,0xe8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 e8 <unknown>
+vssseg8e64.v v24, (a0), a1
+# CHECK-INST: vssseg8e64.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0xea]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 ea <unknown>
+
vssseg8e128.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg8e128.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x0c,0xb5,0xf8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c b5 f8 <unknown>
+vssseg8e128.v v24, (a0), a1
+# CHECK-INST: vssseg8e128.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x0c,0xb5,0xfa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c b5 fa <unknown>
+
vssseg8e256.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg8e256.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x5c,0xb5,0xf8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c b5 f8 <unknown>
+vssseg8e256.v v24, (a0), a1
+# CHECK-INST: vssseg8e256.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x5c,0xb5,0xfa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c b5 fa <unknown>
+
vssseg8e512.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg8e512.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x6c,0xb5,0xf8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c b5 f8 <unknown>
+vssseg8e512.v v24, (a0), a1
+# CHECK-INST: vssseg8e512.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x6c,0xb5,0xfa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c b5 fa <unknown>
+
vssseg8e1024.v v24, (a0), a1, v0.t
# CHECK-INST: vssseg8e1024.v v24, (a0), a1, v0.t
# CHECK-ENCODING: [0x27,0x7c,0xb5,0xf8]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c b5 f8 <unknown>
-vsxseg2ei8.v v24, (a0), v4
-# CHECK-INST: vsxseg2ei8.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x2e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 2e <unknown>
-
-vsxseg2ei16.v v24, (a0), v4
-# CHECK-INST: vsxseg2ei16.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x2e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 2e <unknown>
-
-vsxseg2ei32.v v24, (a0), v4
-# CHECK-INST: vsxseg2ei32.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x2e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 2e <unknown>
-
-vsxseg2ei64.v v24, (a0), v4
-# CHECK-INST: vsxseg2ei64.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x2e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 2e <unknown>
-
-vsxseg2ei128.v v24, (a0), v4
-# CHECK-INST: vsxseg2ei128.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x3e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 3e <unknown>
-
-vsxseg2ei256.v v24, (a0), v4
-# CHECK-INST: vsxseg2ei256.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x3e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 3e <unknown>
-
-vsxseg2ei512.v v24, (a0), v4
-# CHECK-INST: vsxseg2ei512.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x3e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 3e <unknown>
-
-vsxseg2ei1024.v v24, (a0), v4
-# CHECK-INST: vsxseg2ei1024.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x3e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 3e <unknown>
-
-vsxseg2ei8.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg2ei8.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x2c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 2c <unknown>
-
-vsxseg2ei16.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg2ei16.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x2c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 2c <unknown>
-
-vsxseg2ei32.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg2ei32.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x2c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 2c <unknown>
-
-vsxseg2ei64.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg2ei64.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x2c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 2c <unknown>
-
-vsxseg2ei128.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg2ei128.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x3c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 3c <unknown>
-
-vsxseg2ei256.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg2ei256.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x3c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 3c <unknown>
-
-vsxseg2ei512.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg2ei512.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x3c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 3c <unknown>
-
-vsxseg2ei1024.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg2ei1024.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x3c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 3c <unknown>
-
-vsxseg3ei8.v v24, (a0), v4
-# CHECK-INST: vsxseg3ei8.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x4e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 4e <unknown>
-
-vsxseg3ei16.v v24, (a0), v4
-# CHECK-INST: vsxseg3ei16.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x4e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 4e <unknown>
-
-vsxseg3ei32.v v24, (a0), v4
-# CHECK-INST: vsxseg3ei32.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x4e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 4e <unknown>
-
-vsxseg3ei64.v v24, (a0), v4
-# CHECK-INST: vsxseg3ei64.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x4e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 4e <unknown>
-
-vsxseg3ei128.v v24, (a0), v4
-# CHECK-INST: vsxseg3ei128.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x5e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 5e <unknown>
-
-vsxseg3ei256.v v24, (a0), v4
-# CHECK-INST: vsxseg3ei256.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x5e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 5e <unknown>
-
-vsxseg3ei512.v v24, (a0), v4
-# CHECK-INST: vsxseg3ei512.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x5e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 5e <unknown>
-
-vsxseg3ei1024.v v24, (a0), v4
-# CHECK-INST: vsxseg3ei1024.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x5e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 5e <unknown>
-
-vsxseg3ei8.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg3ei8.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x4c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 4c <unknown>
-
-vsxseg3ei16.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg3ei16.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x4c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 4c <unknown>
-
-vsxseg3ei32.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg3ei32.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x4c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 4c <unknown>
-
-vsxseg3ei64.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg3ei64.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x4c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 4c <unknown>
-
-vsxseg3ei128.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg3ei128.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x5c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 5c <unknown>
-
-vsxseg3ei256.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg3ei256.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x5c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 5c <unknown>
-
-vsxseg3ei512.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg3ei512.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x5c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 5c <unknown>
-
-vsxseg3ei1024.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg3ei1024.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x5c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 5c <unknown>
-
-vsxseg4ei8.v v24, (a0), v4
-# CHECK-INST: vsxseg4ei8.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x6e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 6e <unknown>
-
-vsxseg4ei16.v v24, (a0), v4
-# CHECK-INST: vsxseg4ei16.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x6e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 6e <unknown>
-
-vsxseg4ei32.v v24, (a0), v4
-# CHECK-INST: vsxseg4ei32.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x6e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 6e <unknown>
-
-vsxseg4ei64.v v24, (a0), v4
-# CHECK-INST: vsxseg4ei64.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x6e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 6e <unknown>
-
-vsxseg4ei128.v v24, (a0), v4
-# CHECK-INST: vsxseg4ei128.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 7e <unknown>
-
-vsxseg4ei256.v v24, (a0), v4
-# CHECK-INST: vsxseg4ei256.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 7e <unknown>
-
-vsxseg4ei512.v v24, (a0), v4
-# CHECK-INST: vsxseg4ei512.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 7e <unknown>
-
-vsxseg4ei1024.v v24, (a0), v4
-# CHECK-INST: vsxseg4ei1024.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x7e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 7e <unknown>
-
-vsxseg4ei8.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg4ei8.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x6c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 6c <unknown>
-
-vsxseg4ei16.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg4ei16.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x6c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 6c <unknown>
-
-vsxseg4ei32.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg4ei32.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x6c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 6c <unknown>
-
-vsxseg4ei64.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg4ei64.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x6c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 6c <unknown>
-
-vsxseg4ei128.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg4ei128.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x7c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 7c <unknown>
-
-vsxseg4ei256.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg4ei256.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x7c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 7c <unknown>
-
-vsxseg4ei512.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg4ei512.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x7c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 7c <unknown>
-
-vsxseg4ei1024.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg4ei1024.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x7c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 7c <unknown>
-
-vsxseg5ei8.v v24, (a0), v4
-# CHECK-INST: vsxseg5ei8.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x8e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 8e <unknown>
-
-vsxseg5ei16.v v24, (a0), v4
-# CHECK-INST: vsxseg5ei16.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x8e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 8e <unknown>
-
-vsxseg5ei32.v v24, (a0), v4
-# CHECK-INST: vsxseg5ei32.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x8e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 8e <unknown>
-
-vsxseg5ei64.v v24, (a0), v4
-# CHECK-INST: vsxseg5ei64.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x8e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 8e <unknown>
-
-vsxseg5ei128.v v24, (a0), v4
-# CHECK-INST: vsxseg5ei128.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x9e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 9e <unknown>
-
-vsxseg5ei256.v v24, (a0), v4
-# CHECK-INST: vsxseg5ei256.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x9e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 9e <unknown>
-
-vsxseg5ei512.v v24, (a0), v4
-# CHECK-INST: vsxseg5ei512.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x9e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 9e <unknown>
-
-vsxseg5ei1024.v v24, (a0), v4
-# CHECK-INST: vsxseg5ei1024.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x9e]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 9e <unknown>
-
-vsxseg5ei8.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg5ei8.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x8c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 8c <unknown>
-
-vsxseg5ei16.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg5ei16.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x8c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 8c <unknown>
-
-vsxseg5ei32.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg5ei32.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x8c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 8c <unknown>
-
-vsxseg5ei64.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg5ei64.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x8c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 8c <unknown>
-
-vsxseg5ei128.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg5ei128.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0x9c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 9c <unknown>
-
-vsxseg5ei256.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg5ei256.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0x9c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 9c <unknown>
-
-vsxseg5ei512.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg5ei512.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0x9c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 9c <unknown>
-
-vsxseg5ei1024.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg5ei1024.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0x9c]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 9c <unknown>
-
-vsxseg6ei8.v v24, (a0), v4
-# CHECK-INST: vsxseg6ei8.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xae]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 ae <unknown>
-
-vsxseg6ei16.v v24, (a0), v4
-# CHECK-INST: vsxseg6ei16.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xae]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 ae <unknown>
-
-vsxseg6ei32.v v24, (a0), v4
-# CHECK-INST: vsxseg6ei32.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xae]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 ae <unknown>
-
-vsxseg6ei64.v v24, (a0), v4
-# CHECK-INST: vsxseg6ei64.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xae]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 ae <unknown>
-
-vsxseg6ei128.v v24, (a0), v4
-# CHECK-INST: vsxseg6ei128.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xbe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 be <unknown>
-
-vsxseg6ei256.v v24, (a0), v4
-# CHECK-INST: vsxseg6ei256.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xbe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 be <unknown>
-
-vsxseg6ei512.v v24, (a0), v4
-# CHECK-INST: vsxseg6ei512.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xbe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 be <unknown>
-
-vsxseg6ei1024.v v24, (a0), v4
-# CHECK-INST: vsxseg6ei1024.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xbe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 be <unknown>
-
-vsxseg6ei8.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg6ei8.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xac]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 ac <unknown>
-
-vsxseg6ei16.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg6ei16.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xac]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 ac <unknown>
-
-vsxseg6ei32.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg6ei32.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xac]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 ac <unknown>
-
-vsxseg6ei64.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg6ei64.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xac]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 ac <unknown>
-
-vsxseg6ei128.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg6ei128.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xbc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 bc <unknown>
-
-vsxseg6ei256.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg6ei256.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xbc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 bc <unknown>
-
-vsxseg6ei512.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg6ei512.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xbc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 bc <unknown>
-
-vsxseg6ei1024.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg6ei1024.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xbc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 bc <unknown>
-
-vsxseg7ei8.v v24, (a0), v4
-# CHECK-INST: vsxseg7ei8.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xce]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 ce <unknown>
-
-vsxseg7ei16.v v24, (a0), v4
-# CHECK-INST: vsxseg7ei16.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xce]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 ce <unknown>
-
-vsxseg7ei32.v v24, (a0), v4
-# CHECK-INST: vsxseg7ei32.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xce]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 ce <unknown>
-
-vsxseg7ei64.v v24, (a0), v4
-# CHECK-INST: vsxseg7ei64.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xce]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 ce <unknown>
-
-vsxseg7ei128.v v24, (a0), v4
-# CHECK-INST: vsxseg7ei128.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xde]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 de <unknown>
-
-vsxseg7ei256.v v24, (a0), v4
-# CHECK-INST: vsxseg7ei256.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xde]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 de <unknown>
-
-vsxseg7ei512.v v24, (a0), v4
-# CHECK-INST: vsxseg7ei512.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xde]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 de <unknown>
-
-vsxseg7ei1024.v v24, (a0), v4
-# CHECK-INST: vsxseg7ei1024.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xde]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 de <unknown>
-
-vsxseg7ei8.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg7ei8.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xcc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 cc <unknown>
-
-vsxseg7ei16.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg7ei16.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xcc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 cc <unknown>
-
-vsxseg7ei32.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg7ei32.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xcc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 cc <unknown>
+vssseg8e1024.v v24, (a0), a1
+# CHECK-INST: vssseg8e1024.v v24, (a0), a1
+# CHECK-ENCODING: [0x27,0x7c,0xb5,0xfa]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c b5 fa <unknown>
-vsxseg7ei64.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg7ei64.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xcc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 cc <unknown>
+vsuxseg8ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg8ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xe4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 e4 <unknown>
+
+vsuxseg8ei8.v v24, (a0), v4
+# CHECK-INST: vsuxseg8ei8.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xe6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 e6 <unknown>
+
+vsuxseg8ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg8ei16.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xe4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 e4 <unknown>
+
+vsuxseg8ei16.v v24, (a0), v4
+# CHECK-INST: vsuxseg8ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xe6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 e6 <unknown>
+
+vsuxseg8ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg8ei32.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xe4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 e4 <unknown>
+
+vsuxseg8ei32.v v24, (a0), v4
+# CHECK-INST: vsuxseg8ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xe6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 e6 <unknown>
+
+vsuxseg8ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsuxseg8ei64.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xe4]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 e4 <unknown>
+
+vsuxseg8ei64.v v24, (a0), v4
+# CHECK-INST: vsuxseg8ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xe6]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 e6 <unknown>
+
+vsoxseg8ei8.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg8ei8.v v24, (a0), v4, v0.t
+# CHECK-ENCODING: [0x27,0x0c,0x45,0xec]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 0c 45 ec <unknown>
-vsxseg7ei128.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg7ei128.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xdc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 dc <unknown>
-
-vsxseg7ei256.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg7ei256.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xdc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 dc <unknown>
-
-vsxseg7ei512.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg7ei512.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xdc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 dc <unknown>
-
-vsxseg7ei1024.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg7ei1024.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xdc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 dc <unknown>
-
-vsxseg8ei8.v v24, (a0), v4
-# CHECK-INST: vsxseg8ei8.v v24, (a0), v4
+vsoxseg8ei8.v v24, (a0), v4
+# CHECK-INST: vsoxseg8ei8.v v24, (a0), v4
# CHECK-ENCODING: [0x27,0x0c,0x45,0xee]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 0c 45 ee <unknown>
-vsxseg8ei16.v v24, (a0), v4
-# CHECK-INST: vsxseg8ei16.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xee]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 ee <unknown>
-
-vsxseg8ei32.v v24, (a0), v4
-# CHECK-INST: vsxseg8ei32.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xee]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 ee <unknown>
-
-vsxseg8ei64.v v24, (a0), v4
-# CHECK-INST: vsxseg8ei64.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xee]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 ee <unknown>
-
-vsxseg8ei128.v v24, (a0), v4
-# CHECK-INST: vsxseg8ei128.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xfe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 fe <unknown>
-
-vsxseg8ei256.v v24, (a0), v4
-# CHECK-INST: vsxseg8ei256.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xfe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 fe <unknown>
-
-vsxseg8ei512.v v24, (a0), v4
-# CHECK-INST: vsxseg8ei512.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xfe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 fe <unknown>
-
-vsxseg8ei1024.v v24, (a0), v4
-# CHECK-INST: vsxseg8ei1024.v v24, (a0), v4
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xfe]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 fe <unknown>
-
-vsxseg8ei8.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg8ei8.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xec]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 ec <unknown>
-
-vsxseg8ei16.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg8ei16.v v24, (a0), v4, v0.t
+vsoxseg8ei16.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg8ei16.v v24, (a0), v4, v0.t
# CHECK-ENCODING: [0x27,0x5c,0x45,0xec]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 5c 45 ec <unknown>
-vsxseg8ei32.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg8ei32.v v24, (a0), v4, v0.t
+vsoxseg8ei16.v v24, (a0), v4
+# CHECK-INST: vsoxseg8ei16.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x5c,0x45,0xee]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 5c 45 ee <unknown>
+
+vsoxseg8ei32.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg8ei32.v v24, (a0), v4, v0.t
# CHECK-ENCODING: [0x27,0x6c,0x45,0xec]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 6c 45 ec <unknown>
-vsxseg8ei64.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg8ei64.v v24, (a0), v4, v0.t
+vsoxseg8ei32.v v24, (a0), v4
+# CHECK-INST: vsoxseg8ei32.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x6c,0x45,0xee]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 6c 45 ee <unknown>
+
+vsoxseg8ei64.v v24, (a0), v4, v0.t
+# CHECK-INST: vsoxseg8ei64.v v24, (a0), v4, v0.t
# CHECK-ENCODING: [0x27,0x7c,0x45,0xec]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
# CHECK-UNKNOWN: 27 7c 45 ec <unknown>
-vsxseg8ei128.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg8ei128.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x0c,0x45,0xfc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 0c 45 fc <unknown>
-
-vsxseg8ei256.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg8ei256.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x5c,0x45,0xfc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 5c 45 fc <unknown>
-
-vsxseg8ei512.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg8ei512.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x6c,0x45,0xfc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 6c 45 fc <unknown>
-
-vsxseg8ei1024.v v24, (a0), v4, v0.t
-# CHECK-INST: vsxseg8ei1024.v v24, (a0), v4, v0.t
-# CHECK-ENCODING: [0x27,0x7c,0x45,0xfc]
-# CHECK-ERROR: instruction requires the following: 'Zvlsseg' (Vector segment load/store instructions)
-# CHECK-UNKNOWN: 27 7c 45 fc <unknown>
\ No newline at end of file
+vsoxseg8ei64.v v24, (a0), v4
+# CHECK-INST: vsoxseg8ei64.v v24, (a0), v4
+# CHECK-ENCODING: [0x27,0x7c,0x45,0xee]
+# CHECK-ERROR: instruction requires the following: 'Zvlsseg'
+# CHECK-UNKNOWN: 27 7c 45 ee <unknown>
More information about the llvm-branch-commits
mailing list