[llvm-branch-commits] [llvm] 95795e7 - [RISCV] Define vsll/vsrl/vsra intrinsics.
Hsiangkai Wang via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Dec 15 14:36:07 PST 2020
Author: Hsiangkai Wang
Date: 2020-12-16T06:31:47+08:00
New Revision: 95795e7a65a7307065d8f6a030ba56d713a77d9a
URL: https://github.com/llvm/llvm-project/commit/95795e7a65a7307065d8f6a030ba56d713a77d9a
DIFF: https://github.com/llvm/llvm-project/commit/95795e7a65a7307065d8f6a030ba56d713a77d9a.diff
LOG: [RISCV] Define vsll/vsrl/vsra intrinsics.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Hsiangkai Wang <kai.wang at sifive.com>
Differential Revision: https://reviews.llvm.org/D93193
Added:
llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 6fca577f335a..7c648cd65513 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -181,4 +181,8 @@ let TargetPrefix = "riscv" in {
defm vmsbc_borrow_in : RISCVBinaryMaskOutWithV0;
defm vmsbc : RISCVBinaryMaskOut;
+ defm vsll : RISCVBinaryAAX;
+ defm vsrl : RISCVBinaryAAX;
+ defm vsra : RISCVBinaryAAX;
+
} // TargetPrefix = "riscv"
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 63de04c47479..9e685c818c81 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -714,11 +714,11 @@ multiclass VPatBinaryV_I<string intrinsic, string instruction> {
}
multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
- list<VTypeInfo> vtilist>
+ list<VTypeInfo> vtilist, Operand ImmType = simm5>
{
defm "" : VPatBinaryV_VV<intrinsic, instruction, vtilist>;
defm "" : VPatBinaryV_VX<intrinsic, instruction, vtilist>;
- defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
+ defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
}
multiclass VPatBinaryV_VV_VX<string intrinsic, string instruction,
@@ -933,6 +933,13 @@ defm PseudoVSBC : VPseudoBinaryV_VM_XM;
defm PseudoVMSBC : VPseudoBinaryM_VM_XM<"@earlyclobber $rd">;
defm PseudoVMSBC : VPseudoBinaryM_V_X<"@earlyclobber $rd">;
+//===----------------------------------------------------------------------===//
+// 12.6. Vector Single-Width Bit Shift Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVSLL : VPseudoBinaryV_VV_VX_VI<uimm5>;
+defm PseudoVSRL : VPseudoBinaryV_VV_VX_VI<uimm5>;
+defm PseudoVSRA : VPseudoBinaryV_VV_VX_VI<uimm5>;
+
//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
@@ -974,4 +981,14 @@ defm "" : VPatBinaryV_VM_XM<"int_riscv_vsbc", "PseudoVSBC">;
defm "" : VPatBinaryM_VM_XM<"int_riscv_vmsbc_borrow_in", "PseudoVMSBC">;
defm "" : VPatBinaryM_V_X<"int_riscv_vmsbc", "PseudoVMSBC">;
+//===----------------------------------------------------------------------===//
+// 12.6. Vector Single-Width Bit Shift Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsll", "PseudoVSLL", AllIntegerVectors,
+ uimm5>;
+defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
+ uimm5>;
+defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
+ uimm5>;
+
} // Predicates = [HasStdExtV]
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
new file mode 100644
index 000000000000..67f18ec9378d
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
@@ -0,0 +1,1945 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
new file mode 100644
index 000000000000..bbf799fb8d5b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
@@ -0,0 +1,2377 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsll.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsll.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsll.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
new file mode 100644
index 000000000000..24697ab6b8e7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
@@ -0,0 +1,1945 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
new file mode 100644
index 000000000000..5a2d9e6935a8
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
@@ -0,0 +1,2377 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsra.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsra.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsra.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
new file mode 100644
index 000000000000..514c3d31dba1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
@@ -0,0 +1,1945 @@
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i32 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i32 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i32 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i32 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i32 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i32 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
new file mode 100644
index 000000000000..9598eed618b0
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
@@ -0,0 +1,2377 @@
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ <vscale x 64 x i8> %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ <vscale x 32 x i16> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ <vscale x 16 x i32> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ <vscale x 2 x i64> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ <vscale x 4 x i64> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsrl.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ <vscale x 8 x i64> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ i8,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i8,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ i8,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i8,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ i8,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i8,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ i8,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i8,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ i8,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i8,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ i8,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i8,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ i8,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 %1,
+ i64 %2)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+ <vscale x 64 x i8>,
+ <vscale x 64 x i8>,
+ i8,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 %2,
+ <vscale x 64 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 64 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ i16,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i16,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ i16,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i16,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ i16,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i16,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ i16,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i16,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ i16,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i16,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ i16,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 %1,
+ i64 %2)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i16>,
+ i16,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ i32,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i32,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ i32,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i32,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ i32,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i32,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ i32,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i32,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ i32,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 %1,
+ i64 %2)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i32>,
+ i32,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ i64,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i64>,
+ i64,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ i64,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i64>,
+ i64,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ i64,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i64>,
+ i64,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ i64,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 %1,
+ i64 %2)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i64>,
+ i64,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsrl.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.i8(
+ <vscale x 1 x i8> %0,
+ <vscale x 1 x i8> %1,
+ i8 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.i8(
+ <vscale x 2 x i8> %0,
+ <vscale x 2 x i8> %1,
+ i8 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.i8(
+ <vscale x 4 x i8> %0,
+ <vscale x 4 x i8> %1,
+ i8 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.i8(
+ <vscale x 8 x i8> %0,
+ <vscale x 8 x i8> %1,
+ i8 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.i8(
+ <vscale x 16 x i8> %0,
+ <vscale x 16 x i8> %1,
+ i8 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.i8(
+ <vscale x 32 x i8> %0,
+ <vscale x 32 x i8> %1,
+ i8 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ i8 9,
+ i64 %1)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.i8(
+ <vscale x 64 x i8> %0,
+ <vscale x 64 x i8> %1,
+ i8 9,
+ <vscale x 64 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.i16(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i16> %1,
+ i16 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.i16(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i16> %1,
+ i16 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.i16(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i16> %1,
+ i16 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.i16(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i16> %1,
+ i16 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.i16(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i16> %1,
+ i16 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ i16 9,
+ i64 %1)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.i16(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i16> %1,
+ i16 9,
+ <vscale x 32 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.i32(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i32> %1,
+ i32 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.i32(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i32> %1,
+ i32 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.i32(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i32> %1,
+ i32 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.i32(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i32> %1,
+ i32 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ i32 9,
+ i64 %1)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.i32(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i32> %1,
+ i32 9,
+ <vscale x 16 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.i64(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i64> %1,
+ i64 9,
+ <vscale x 1 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.i64(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i64> %1,
+ i64 9,
+ <vscale x 2 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.i64(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i64> %1,
+ i64 9,
+ <vscale x 4 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ i64 9,
+ i64 %1)
+
+ ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+entry:
+; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64
+; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
+; CHECK: vsrl.vi {{v[0-9]+}}, {{v[0-9]+}}, 9, v0.t
+ %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.i64(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i64> %1,
+ i64 9,
+ <vscale x 8 x i1> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
More information about the llvm-branch-commits
mailing list