[llvm-branch-commits] [llvm] 912740a - [RISCV] Add intrinsics for vrgather instruction
ShihPo Hung via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Thu Dec 24 18:28:36 PST 2020
Author: ShihPo Hung
Date: 2020-12-24T18:16:02-08:00
New Revision: 912740a864feeac37064844a8cb4743582aec558
URL: https://github.com/llvm/llvm-project/commit/912740a864feeac37064844a8cb4743582aec558
DIFF: https://github.com/llvm/llvm-project/commit/912740a864feeac37064844a8cb4743582aec558.diff
LOG: [RISCV] Add intrinsics for vrgather instruction
This patch defines vrgather intrinsics and lower to V instructions.
We work with @rogfer01 from BSC to come out this patch.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: ShihPo Hung <shihpo.hung at sifive.com>
Differential revision: https://reviews.llvm.org/D93797
Added:
llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index e6af2374a500..cb335e739266 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -608,6 +608,8 @@ let TargetPrefix = "riscv" in {
defm vfslide1up : RISCVBinaryAAX;
defm vfslide1down : RISCVBinaryAAX;
+ defm vrgather : RISCVBinaryAAX;
+
defm vaaddu : RISCVSaturatingBinaryAAX;
defm vaadd : RISCVSaturatingBinaryAAX;
defm vasubu : RISCVSaturatingBinaryAAX;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index ef1003dee7af..713a289badc2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -178,6 +178,16 @@ defset list<VTypeInfo> AllVectors = {
}
}
+// This functor is used to obtain the int vector type that has the same SEW and
+// multiplier as the input parameter type
+class GetIntVTypeInfo<VTypeInfo vti>
+{
+ // Equivalent integer vector type. Eg.
+ // VI8M1 → VI8M1 (identity)
+ // VF64M4 → VI64M4
+ VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
+}
+
class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
{
VTypeInfo Vti = vti;
@@ -671,9 +681,9 @@ multiclass VPseudoBinary<VReg RetClass,
}
}
-multiclass VPseudoBinaryV_VV {
+multiclass VPseudoBinaryV_VV<string Constraint = ""> {
foreach m = MxList.m in
- defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m>;
+ defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint>;
}
multiclass VPseudoBinaryV_VX<bit IsFloat, string Constraint = ""> {
@@ -682,9 +692,9 @@ multiclass VPseudoBinaryV_VX<bit IsFloat, string Constraint = ""> {
!if(IsFloat, FPR32, GPR), m, Constraint>;
}
-multiclass VPseudoBinaryV_VI<Operand ImmType = simm5> {
+multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
foreach m = MxList.m in
- defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m>;
+ defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
// We use earlyclobber here due to
@@ -811,10 +821,10 @@ multiclass VPseudoBinaryM_VI {
defm _VI : VPseudoBinary<VR, m.vrclass, simm5, m, "@earlyclobber $rd">;
}
-multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5> {
- defm "" : VPseudoBinaryV_VV;
- defm "" : VPseudoBinaryV_VX</*IsFloat=*/0>;
- defm "" : VPseudoBinaryV_VI<ImmType>;
+multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
+ defm "" : VPseudoBinaryV_VV<Constraint>;
+ defm "" : VPseudoBinaryV_VX</*IsFloat=*/false, Constraint>;
+ defm "" : VPseudoBinaryV_VI<ImmType, Constraint>;
}
multiclass VPseudoBinaryV_VV_VX<bit IsFloat = 0> {
@@ -1248,6 +1258,17 @@ multiclass VPatBinaryV_VV<string intrinsic, string instruction,
vti.RegClass, vti.RegClass>;
}
+multiclass VPatBinaryV_VV_INT<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist> {
+ foreach vti = vtilist in {
+ defvar ivti = GetIntVTypeInfo<vti>.Vti;
+ defm : VPatBinary<intrinsic, instruction, "VV",
+ vti.Vector, vti.Vector, ivti.Vector, vti.Mask,
+ vti.SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, vti.RegClass>;
+ }
+}
+
multiclass VPatBinaryV_VX<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
foreach vti = vtilist in
@@ -1258,6 +1279,15 @@ multiclass VPatBinaryV_VX<string intrinsic, string instruction,
vti.RegClass, vti.ScalarRegClass>;
}
+multiclass VPatBinaryV_VX_INT<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist> {
+ foreach vti = vtilist in
+ defm : VPatBinary<intrinsic, instruction, "VX",
+ vti.Vector, vti.Vector, XLenVT, vti.Mask,
+ vti.SEW, vti.LMul, vti.RegClass,
+ vti.RegClass, GPR>;
+}
+
multiclass VPatBinaryV_VI<string intrinsic, string instruction,
list<VTypeInfo> vtilist, Operand imm_type> {
foreach vti = vtilist in
@@ -1644,6 +1674,14 @@ multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
defm "" : VPatBinaryM_VI<intrinsic, instruction, vtilist>;
}
+multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
+ list<VTypeInfo> vtilist, Operand ImmType = simm5>
+{
+ defm "" : VPatBinaryV_VV_INT<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryV_VX_INT<intrinsic, instruction, vtilist>;
+ defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
+}
+
multiclass VPatReductionV_VS<string intrinsic, string instruction, bit IsFloat = 0> {
foreach vti = !if(IsFloat, NoGroupFloatVectors, NoGroupIntegerVectors) in
{
@@ -2079,6 +2117,11 @@ let Predicates = [HasStdExtV, HasStdExtF] in {
defm PseudoVFSLIDE1DOWN : VPseudoBinaryV_VX</*IsFloat*/true>;
} // Predicates = [HasStdExtV, HasStdExtF]
+//===----------------------------------------------------------------------===//
+// 17.4. Vector Register Gather Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVRGATHER : VPseudoBinaryV_VV_VX_VI<uimm5, "@earlyclobber $rd">;
+
//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
@@ -2536,5 +2579,18 @@ let Predicates = [HasStdExtV, HasStdExtF] in {
defm "" : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN", AllFloatVectors>;
} // Predicates = [HasStdExtV, HasStdExtF]
+//===----------------------------------------------------------------------===//
+// 17.4. Vector Register Gather Instructions
+//===----------------------------------------------------------------------===//
+let Predicates = [HasStdExtV] in {
+ defm "" : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
+ AllIntegerVectors, uimm5>;
+} // Predicates = [HasStdExtV]
+
+let Predicates = [HasStdExtV, HasStdExtF] in {
+ defm "" : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER",
+ AllFloatVectors, uimm5>;
+} // Predicates = [HasStdExtV, HasStdExtF]
+
// Include the non-intrinsic ISel patterns
include "RISCVInstrInfoVSDPatterns.td"
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
new file mode 100644
index 000000000000..cbe399a77c56
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv32.ll
@@ -0,0 +1,3624 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i16(
+ <vscale x 1 x half>,
+ i16,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i16(
+ <vscale x 1 x half> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i16(
+ <vscale x 2 x half>,
+ i16,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i16(
+ <vscale x 2 x half> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i16(
+ <vscale x 4 x half>,
+ i16,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i16(
+ <vscale x 4 x half> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i16(
+ <vscale x 8 x half>,
+ i16,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i16(
+ <vscale x 8 x half> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i16(
+ <vscale x 16 x half>,
+ i16,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i16(
+ <vscale x 16 x half> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i16(
+ <vscale x 32 x half>,
+ i16,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16(<vscale x 32 x half> %0, i16 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i16(
+ <vscale x 32 x half> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ i16,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+ <vscale x 1 x float>,
+ i32,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+ <vscale x 1 x float> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+ <vscale x 2 x float>,
+ i32,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+ <vscale x 2 x float> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+ <vscale x 4 x float>,
+ i32,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+ <vscale x 4 x float> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+ <vscale x 8 x float>,
+ i32,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+ <vscale x 8 x float> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+ <vscale x 16 x float>,
+ i32,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, i32 %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+ <vscale x 16 x float> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ i32,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i16(
+ <vscale x 1 x half> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i16(
+ <vscale x 2 x half> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i16(
+ <vscale x 4 x half> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i16(
+ <vscale x 8 x half> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i16(
+ <vscale x 16 x half> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16(<vscale x 32 x half> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i16(
+ <vscale x 32 x half> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 32 x half> %a
+}
+
+define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x half> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+ <vscale x 1 x float> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+ <vscale x 2 x float> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+ <vscale x 4 x float> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+ <vscale x 8 x float> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+ <vscale x 16 x float> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x float> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
new file mode 100644
index 000000000000..c9c0a7221433
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgather-rv64.ll
@@ -0,0 +1,4630 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i8_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i8_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i8_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i8_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i8_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv64i8_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a1)
+; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i16_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i16_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i16_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i16_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i16_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32i16_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i32_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i32_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i32_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i32_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16i32_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i64_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2i64_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4i64_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8i64_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f16_nxv1f16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.nxv1i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f16_nxv2f16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.nxv2i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f16_nxv4f16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.nxv4i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f16_nxv8f16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.nxv8i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f16_nxv16f16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.nxv16i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv32f16_nxv32f16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a1)
+; CHECK-NEXT: vle16.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.nxv32i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f32_nxv1f32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.nxv1i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f32_nxv2f32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.nxv2i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f32_nxv4f32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.nxv4i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f32_nxv8f32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.nxv8i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv16f32_nxv16f32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a1)
+; CHECK-NEXT: vle32.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.nxv16i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v25, v16, v17
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1f64_nxv1f64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.nxv1i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v26, v16, v18
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv2f64_nxv2f64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v18, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.nxv2i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v28, v16, v20
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv4f64_nxv4f64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
+; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v20, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.nxv4i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vv_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v8, v16, v24
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv8f64_nxv8f64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vv v16, v24, v8, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.nxv8i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2i64_nxv2i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2i64_nxv2i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4i64_nxv4i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4i64_nxv4i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8i64_nxv8i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8i64_nxv8i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i16(
+ <vscale x 1 x half>,
+ i16,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f16_nxv1f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i16(
+ <vscale x 1 x half> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i16(
+ <vscale x 1 x half>,
+ <vscale x 1 x half>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x half> @intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f16_nxv1f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i16(
+ <vscale x 2 x half>,
+ i16,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f16_nxv2f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i16(
+ <vscale x 2 x half> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i16(
+ <vscale x 2 x half>,
+ <vscale x 2 x half>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x half> @intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f16_nxv2f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i16(
+ <vscale x 4 x half>,
+ i16,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f16_nxv4f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i16(
+ <vscale x 4 x half> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i16(
+ <vscale x 4 x half>,
+ <vscale x 4 x half>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x half> @intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f16_nxv4f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i16(
+ <vscale x 8 x half>,
+ i16,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f16_nxv8f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i16(
+ <vscale x 8 x half> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i16(
+ <vscale x 8 x half>,
+ <vscale x 8 x half>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x half> @intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f16_nxv8f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i16(
+ <vscale x 16 x half>,
+ i16,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f16_nxv16f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i16(
+ <vscale x 16 x half> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i16(
+ <vscale x 16 x half>,
+ <vscale x 16 x half>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x half> @intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f16_nxv16f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i16(
+ <vscale x 32 x half>,
+ i16,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16(<vscale x 32 x half> %0, i16 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv32f16_nxv32f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i16(
+ <vscale x 32 x half> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i16(
+ <vscale x 32 x half>,
+ <vscale x 32 x half>,
+ i16,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x half> @intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv32f16_nxv32f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+ <vscale x 1 x float>,
+ i32,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f32_nxv1f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+ <vscale x 1 x float> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+ <vscale x 1 x float>,
+ <vscale x 1 x float>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x float> @intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f32_nxv1f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+ <vscale x 2 x float>,
+ i32,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f32_nxv2f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+ <vscale x 2 x float> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+ <vscale x 2 x float>,
+ <vscale x 2 x float>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x float> @intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f32_nxv2f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+ <vscale x 4 x float>,
+ i32,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f32_nxv4f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+ <vscale x 4 x float> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+ <vscale x 4 x float>,
+ <vscale x 4 x float>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x float> @intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f32_nxv4f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+ <vscale x 8 x float>,
+ i32,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f32_nxv8f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+ <vscale x 8 x float> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+ <vscale x 8 x float>,
+ <vscale x 8 x float>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x float> @intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f32_nxv8f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+ <vscale x 16 x float>,
+ i32,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, i32 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv16f32_nxv16f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+ <vscale x 16 x float> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+ <vscale x 16 x float>,
+ <vscale x 16 x float>,
+ i32,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x float> @intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv16f32_nxv16f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i64(
+ <vscale x 1 x double>,
+ i64,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv1f64_nxv1f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v25, v16, a0
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i64(
+ <vscale x 1 x double> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i64(
+ <vscale x 1 x double>,
+ <vscale x 1 x double>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x double> @intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv1f64_nxv1f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v17, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i64(
+ <vscale x 2 x double>,
+ i64,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv2f64_nxv2f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v26, v16, a0
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i64(
+ <vscale x 2 x double> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i64(
+ <vscale x 2 x double>,
+ <vscale x 2 x double>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x double> @intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv2f64_nxv2f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v18, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i64(
+ <vscale x 4 x double>,
+ i64,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv4f64_nxv4f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v28, v16, a0
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i64(
+ <vscale x 4 x double> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i64(
+ <vscale x 4 x double>,
+ <vscale x 4 x double>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x double> @intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv4f64_nxv4f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v20, a0, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i64(
+ <vscale x 8 x double>,
+ i64,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64(<vscale x 8 x double> %0, i64 %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vx_nxv8f64_nxv8f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v8, v16, a0
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i64(
+ <vscale x 8 x double> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i64(
+ <vscale x 8 x double>,
+ <vscale x 8 x double>,
+ i64,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x double> @intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vx_nxv8f64_nxv8f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vx v16, v8, a1, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x double> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i8_nxv1i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i8> @llvm.riscv.vrgather.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i8_nxv2i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i8> @llvm.riscv.vrgather.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i8_nxv4i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i8> @llvm.riscv.vrgather.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i8_nxv8i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i8> @llvm.riscv.vrgather.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i8_nxv16i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i8> @llvm.riscv.vrgather.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i8_nxv32i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i8> @llvm.riscv.vrgather.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv64i8_nxv64i8_i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e8,m8,ta,mu
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e8,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 64 x i8> @llvm.riscv.vrgather.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i16_nxv1i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vrgather.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i16_nxv2i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vrgather.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i16_nxv4i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vrgather.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i16_nxv8i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vrgather.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i16_nxv16i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vrgather.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32i16_nxv32i16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vrgather.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i32_nxv1i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vrgather.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i32_nxv2i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vrgather.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i32_nxv4i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vrgather.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i32_nxv8i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vrgather.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16i32_nxv16i32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vrgather.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1i64_nxv1i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vrgather.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2i64_nxv2i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2i64_nxv2i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vrgather.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4i64_nxv4i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4i64_nxv4i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vrgather.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8i64_nxv8i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8i64_nxv8i64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vrgather.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f16_nxv1f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.nxv1f16.i16(
+ <vscale x 1 x half> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x half> %a
+}
+
+define <vscale x 1 x half> @intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f16_nxv1f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x half> @llvm.riscv.vrgather.mask.nxv1f16.i16(
+ <vscale x 1 x half> %0,
+ <vscale x 1 x half> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f16_nxv2f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.nxv2f16.i16(
+ <vscale x 2 x half> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x half> %a
+}
+
+define <vscale x 2 x half> @intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f16_nxv2f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x half> @llvm.riscv.vrgather.mask.nxv2f16.i16(
+ <vscale x 2 x half> %0,
+ <vscale x 2 x half> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f16_nxv4f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.nxv4f16.i16(
+ <vscale x 4 x half> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x half> %a
+}
+
+define <vscale x 4 x half> @intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f16_nxv4f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x half> @llvm.riscv.vrgather.mask.nxv4f16.i16(
+ <vscale x 4 x half> %0,
+ <vscale x 4 x half> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f16_nxv8f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.nxv8f16.i16(
+ <vscale x 8 x half> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x half> %a
+}
+
+define <vscale x 8 x half> @intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f16_nxv8f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x half> @llvm.riscv.vrgather.mask.nxv8f16.i16(
+ <vscale x 8 x half> %0,
+ <vscale x 8 x half> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f16_nxv16f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.nxv16f16.i16(
+ <vscale x 16 x half> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x half> %a
+}
+
+define <vscale x 16 x half> @intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f16_nxv16f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x half> @llvm.riscv.vrgather.mask.nxv16f16.i16(
+ <vscale x 16 x half> %0,
+ <vscale x 16 x half> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x half> %a
+}
+
+define <vscale x 32 x half> @intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16(<vscale x 32 x half> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv32f16_nxv32f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.nxv32f16.i16(
+ <vscale x 32 x half> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 32 x half> %a
+}
+
+define <vscale x 32 x half> @intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv32f16_nxv32f16_i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e16,m8,ta,mu
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x half> @llvm.riscv.vrgather.mask.nxv32f16.i16(
+ <vscale x 32 x half> %0,
+ <vscale x 32 x half> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x half> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f32_nxv1f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.nxv1f32.i32(
+ <vscale x 1 x float> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 1 x float> @intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f32_nxv1f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x float> @llvm.riscv.vrgather.mask.nxv1f32.i32(
+ <vscale x 1 x float> %0,
+ <vscale x 1 x float> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f32_nxv2f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.nxv2f32.i32(
+ <vscale x 2 x float> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 2 x float> @intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f32_nxv2f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x float> @llvm.riscv.vrgather.mask.nxv2f32.i32(
+ <vscale x 2 x float> %0,
+ <vscale x 2 x float> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f32_nxv4f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.nxv4f32.i32(
+ <vscale x 4 x float> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 4 x float> @intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f32_nxv4f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x float> @llvm.riscv.vrgather.mask.nxv4f32.i32(
+ <vscale x 4 x float> %0,
+ <vscale x 4 x float> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f32_nxv8f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.nxv8f32.i32(
+ <vscale x 8 x float> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 8 x float> @intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f32_nxv8f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x float> @llvm.riscv.vrgather.mask.nxv8f32.i32(
+ <vscale x 8 x float> %0,
+ <vscale x 8 x float> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv16f32_nxv16f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.nxv16f32.i32(
+ <vscale x 16 x float> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 16 x float> @intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv16f32_nxv16f32_i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e32,m8,ta,mu
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x float> @llvm.riscv.vrgather.mask.nxv16f32.i32(
+ <vscale x 16 x float> %0,
+ <vscale x 16 x float> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x float> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv1f64_nxv1f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v25, v16, 9
+; CHECK-NEXT: vmv1r.v v16, v25
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vrgather.nxv1f64.i64(
+ <vscale x 1 x double> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x double> %a
+}
+
+define <vscale x 1 x double> @intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv1f64_nxv1f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v17, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x double> @llvm.riscv.vrgather.mask.nxv1f64.i64(
+ <vscale x 1 x double> %0,
+ <vscale x 1 x double> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv2f64_nxv2f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v26, v16, 9
+; CHECK-NEXT: vmv2r.v v16, v26
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vrgather.nxv2f64.i64(
+ <vscale x 2 x double> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x double> %a
+}
+
+define <vscale x 2 x double> @intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv2f64_nxv2f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v18, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x double> @llvm.riscv.vrgather.mask.nxv2f64.i64(
+ <vscale x 2 x double> %0,
+ <vscale x 2 x double> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv4f64_nxv4f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v28, v16, 9
+; CHECK-NEXT: vmv4r.v v16, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vrgather.nxv4f64.i64(
+ <vscale x 4 x double> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x double> %a
+}
+
+define <vscale x 4 x double> @intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv4f64_nxv4f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v20, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x double> @llvm.riscv.vrgather.mask.nxv4f64.i64(
+ <vscale x 4 x double> %0,
+ <vscale x 4 x double> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64(<vscale x 8 x double> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_vi_nxv8f64_nxv8f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v8, v16, 9
+; CHECK-NEXT: vmv8r.v v16, v8
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vrgather.nxv8f64.i64(
+ <vscale x 8 x double> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 8 x double> %a
+}
+
+define <vscale x 8 x double> @intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vrgather_mask_vi_nxv8f64_nxv8f64_i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT: vrgather.vi v16, v8, 9, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x double> @llvm.riscv.vrgather.mask.nxv8f64.i64(
+ <vscale x 8 x double> %0,
+ <vscale x 8 x double> %1,
+ i64 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x double> %a
+}
More information about the llvm-branch-commits
mailing list