[llvm] 348c67e - [RISCV] Merge rv32/rv64 vector single-width shift intrinsic tests that have the same content. NFC.

Jim Lin via llvm-commits llvm-commits at lists.llvm.org
Sat Jul 15 22:12:41 PDT 2023


Author: Jim Lin
Date: 2023-07-16T13:09:10+08:00
New Revision: 348c67e25468defabd3aeeccd99590e3194a89a8

URL: https://github.com/llvm/llvm-project/commit/348c67e25468defabd3aeeccd99590e3194a89a8
DIFF: https://github.com/llvm/llvm-project/commit/348c67e25468defabd3aeeccd99590e3194a89a8.diff

LOG: [RISCV] Merge rv32/rv64 vector single-width shift intrinsic tests that have the same content. NFC.

Added: 
    llvm/test/CodeGen/RISCV/rvv/vsll.ll
    llvm/test/CodeGen/RISCV/rvv/vsra.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl.ll

Modified: 
    

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
deleted file mode 100644
index 709e3f07273871..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv64.ll
+++ /dev/null
@@ -1,2833 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vsll.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsll.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vsll.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsll.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsll_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vadd.vv v8, v8, v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i64 1,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vadd.vv v8, v9, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 9,
-    <vscale x 64 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vsll.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vsll.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsll.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsll.ll
index d08e8c9b31bfbc..7e19abfedb0cb0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsll-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsll.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1042,10 +1044,10 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsll_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1055,8 +1057,8 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1064,12 +1066,12 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1079,9 +1081,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1089,10 +1091,10 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsll_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1102,8 +1104,8 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1111,12 +1113,12 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1126,9 +1128,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1136,10 +1138,10 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsll_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1149,8 +1151,8 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1158,12 +1160,12 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1173,9 +1175,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1183,10 +1185,10 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsll_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1196,8 +1198,8 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1205,12 +1207,12 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1220,9 +1222,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1230,10 +1232,10 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsll_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1243,8 +1245,8 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1252,12 +1254,12 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1267,9 +1269,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1277,10 +1279,10 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsll_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1290,8 +1292,8 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1299,12 +1301,12 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
+  iXLen,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1314,9 +1316,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1324,10 +1326,10 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsll_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1337,8 +1339,8 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1346,12 +1348,12 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32,
+  iXLen,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1361,9 +1363,9 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1371,10 +1373,10 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsll_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1384,8 +1386,8 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1393,12 +1395,12 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1408,9 +1410,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1418,10 +1420,10 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsll_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1431,8 +1433,8 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1440,12 +1442,12 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1455,9 +1457,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1465,10 +1467,10 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsll_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1478,8 +1480,8 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1487,12 +1489,12 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1502,9 +1504,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1512,10 +1514,10 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsll_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1525,8 +1527,8 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1534,12 +1536,12 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1549,9 +1551,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1559,10 +1561,10 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsll_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1572,8 +1574,8 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1581,12 +1583,12 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1596,9 +1598,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1606,10 +1608,10 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsll_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1619,8 +1621,8 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1628,12 +1630,12 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32,
+  iXLen,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1643,9 +1645,9 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1653,10 +1655,10 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsll_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1666,8 +1668,8 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1675,12 +1677,12 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1690,9 +1692,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1700,10 +1702,10 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsll_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1713,8 +1715,8 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1722,12 +1724,12 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1737,9 +1739,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1747,10 +1749,10 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsll_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1760,8 +1762,8 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1769,12 +1771,12 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1784,9 +1786,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1794,10 +1796,10 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsll_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1807,8 +1809,8 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1816,12 +1818,12 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1831,9 +1833,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1841,10 +1843,10 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsll_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1854,8 +1856,8 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1863,12 +1865,12 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1878,9 +1880,9 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1888,10 +1890,10 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsll_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1901,8 +1903,8 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1910,12 +1912,12 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsll_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1925,9 +1927,9 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1935,10 +1937,10 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsll_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1948,8 +1950,8 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1957,12 +1959,12 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsll_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1972,9 +1974,9 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1982,10 +1984,10 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsll_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1995,8 +1997,8 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2004,12 +2006,12 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsll_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2019,9 +2021,9 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2029,10 +2031,10 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsll_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2042,8 +2044,8 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2051,12 +2053,12 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsll_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2066,14 +2068,14 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsll_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -2083,13 +2085,13 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsll_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsll_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -2099,13 +2101,13 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    i32 1,
-    i32 %1)
+    iXLen 1,
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -2115,14 +2117,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -2132,14 +2134,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 1,
+    iXLen 1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -2149,13 +2151,13 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.nxv2i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -2165,14 +2167,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsll.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsll_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -2182,13 +2184,13 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.nxv4i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -2198,14 +2200,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsll.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsll_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -2215,13 +2217,13 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.nxv8i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -2231,14 +2233,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsll.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsll_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2248,13 +2250,13 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.nxv16i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -2264,14 +2266,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsll.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsll_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2281,13 +2283,13 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.nxv32i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -2297,14 +2299,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsll.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsll_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -2314,13 +2316,13 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.nxv64i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -2330,14 +2332,14 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsll.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 64 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsll_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2347,13 +2349,13 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.nxv1i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -2363,14 +2365,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsll.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsll_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2380,13 +2382,13 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.nxv2i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -2396,14 +2398,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsll.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsll_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2413,13 +2415,13 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.nxv4i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -2429,14 +2431,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsll.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsll_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2446,13 +2448,13 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.nxv8i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -2462,14 +2464,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsll.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsll_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2479,13 +2481,13 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.nxv16i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -2495,14 +2497,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsll.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsll_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -2512,13 +2514,13 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.nxv32i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -2528,14 +2530,14 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsll.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsll_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2545,13 +2547,13 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.nxv1i32(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -2561,14 +2563,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsll.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsll_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2578,13 +2580,13 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.nxv2i32(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -2594,14 +2596,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsll.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsll_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2611,13 +2613,13 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.nxv4i32(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -2627,14 +2629,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsll.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsll_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2644,13 +2646,13 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.nxv8i32(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -2660,14 +2662,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsll.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsll_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -2677,13 +2679,13 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.nxv16i32(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -2693,14 +2695,14 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsll.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsll_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2710,13 +2712,13 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.nxv1i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -2726,14 +2728,14 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsll.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsll_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2743,13 +2745,13 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.nxv2i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -2759,14 +2761,14 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsll.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsll_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2776,13 +2778,13 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.nxv4i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -2792,14 +2794,14 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsll.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsll_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -2809,13 +2811,13 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.nxv8i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -2825,9 +2827,9 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsll.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
deleted file mode 100644
index 32cb8d2127aa00..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv64.ll
+++ /dev/null
@@ -1,2800 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vsra.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsra.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i64,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i64,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i64,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i64,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i64,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i64,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vsra.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsra.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i64 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i64 9,
-    <vscale x 64 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i64 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsra_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vsra.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vsra.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsra.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsra.ll
index 4897073e9dcbb2..4a8cded04f3cc9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsra-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsra.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1042,10 +1044,10 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsra_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1055,8 +1057,8 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1064,12 +1066,12 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1079,9 +1081,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1089,10 +1091,10 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsra_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1102,8 +1104,8 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1111,12 +1113,12 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1126,9 +1128,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1136,10 +1138,10 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsra_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1149,8 +1151,8 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1158,12 +1160,12 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1173,9 +1175,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1183,10 +1185,10 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsra_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1196,8 +1198,8 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1205,12 +1207,12 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1220,9 +1222,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1230,10 +1232,10 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsra_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1243,8 +1245,8 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1252,12 +1254,12 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1267,9 +1269,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1277,10 +1279,10 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsra_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1290,8 +1292,8 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1299,12 +1301,12 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i32,
+  iXLen,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1314,9 +1316,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1324,10 +1326,10 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsra_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1337,8 +1339,8 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1346,12 +1348,12 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i32,
+  iXLen,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1361,9 +1363,9 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1371,10 +1373,10 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsra_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1384,8 +1386,8 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1393,12 +1395,12 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1408,9 +1410,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1418,10 +1420,10 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsra_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1431,8 +1433,8 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1440,12 +1442,12 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1455,9 +1457,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1465,10 +1467,10 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsra_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1478,8 +1480,8 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1487,12 +1489,12 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1502,9 +1504,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1512,10 +1514,10 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsra_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1525,8 +1527,8 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1534,12 +1536,12 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1549,9 +1551,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1559,10 +1561,10 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsra_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1572,8 +1574,8 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1581,12 +1583,12 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1596,9 +1598,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1606,10 +1608,10 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsra_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1619,8 +1621,8 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1628,12 +1630,12 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i32,
+  iXLen,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1643,9 +1645,9 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1653,10 +1655,10 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsra_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1666,8 +1668,8 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1675,12 +1677,12 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1690,9 +1692,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1700,10 +1702,10 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsra_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1713,8 +1715,8 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1722,12 +1724,12 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1737,9 +1739,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1747,10 +1749,10 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsra_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1760,8 +1762,8 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1769,12 +1771,12 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1784,9 +1786,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1794,10 +1796,10 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsra_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1807,8 +1809,8 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1816,12 +1818,12 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1831,9 +1833,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1841,10 +1843,10 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsra_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1854,8 +1856,8 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1863,12 +1865,12 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1878,9 +1880,9 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1888,10 +1890,10 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsra_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1901,8 +1903,8 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1910,12 +1912,12 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsra_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1925,9 +1927,9 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1935,10 +1937,10 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsra_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1948,8 +1950,8 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1957,12 +1959,12 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsra_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1972,9 +1974,9 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1982,10 +1984,10 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsra_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1995,8 +1997,8 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2004,12 +2006,12 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsra_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2019,9 +2021,9 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2029,10 +2031,10 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsra_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2042,8 +2044,8 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2051,12 +2053,12 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsra_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2066,14 +2068,14 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsra_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -2083,13 +2085,13 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -2099,14 +2101,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsra.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsra_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -2116,13 +2118,13 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.nxv2i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -2132,14 +2134,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsra.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsra_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -2149,13 +2151,13 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.nxv4i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -2165,14 +2167,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsra.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsra_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -2182,13 +2184,13 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.nxv8i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -2198,14 +2200,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsra.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsra_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2215,13 +2217,13 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.nxv16i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -2231,14 +2233,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsra.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsra_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2248,13 +2250,13 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.nxv32i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -2264,14 +2266,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsra.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsra_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -2281,13 +2283,13 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.nxv64i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -2297,14 +2299,14 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsra.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 64 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsra_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2314,13 +2316,13 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.nxv1i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -2330,14 +2332,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsra.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsra_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2347,13 +2349,13 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.nxv2i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -2363,14 +2365,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsra.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsra_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2380,13 +2382,13 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.nxv4i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -2396,14 +2398,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsra.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsra_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2413,13 +2415,13 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.nxv8i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -2429,14 +2431,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsra.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsra_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2446,13 +2448,13 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.nxv16i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -2462,14 +2464,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsra.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsra_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -2479,13 +2481,13 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.nxv32i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -2495,14 +2497,14 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsra.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsra_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2512,13 +2514,13 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.nxv1i32(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -2528,14 +2530,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsra.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsra_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2545,13 +2547,13 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.nxv2i32(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -2561,14 +2563,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsra.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsra_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2578,13 +2580,13 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.nxv4i32(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -2594,14 +2596,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsra.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsra_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2611,13 +2613,13 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.nxv8i32(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -2627,14 +2629,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsra.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsra_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -2644,13 +2646,13 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.nxv16i32(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -2660,14 +2662,14 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsra.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsra_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2677,13 +2679,13 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.nxv1i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -2693,14 +2695,14 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsra.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsra_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2710,13 +2712,13 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.nxv2i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -2726,14 +2728,14 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsra.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsra_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2743,13 +2745,13 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.nxv4i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -2759,14 +2761,14 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsra.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsra_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -2776,13 +2778,13 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.nxv8i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsra_mask_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -2792,9 +2794,9 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsra.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
deleted file mode 100644
index 6cb14dbb0de433..00000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv32.ll
+++ /dev/null
@@ -1,2800 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re16.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re32.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v10, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v12, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vsrl.vv v8, v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vl8re64.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsrl.vv v8, v16, v24, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  i32,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  i32,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i32 %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  i32,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT:    vsrl.vx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    i32 9,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    i32 9,
-    <vscale x 64 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    i32 9,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT:    vsrl.vi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsrl.ll
index 1d4dea0aeff66b..b66fec5c7bb0f5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsrl-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsrl.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -159,7 +161,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -169,10 +171,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -184,7 +186,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -193,9 +195,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -206,7 +208,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -216,10 +218,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -231,7 +233,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -240,9 +242,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -253,7 +255,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -263,10 +265,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -278,7 +280,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -287,9 +289,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -300,7 +302,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -310,10 +312,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
@@ -326,7 +328,7 @@ entry:
     <vscale x 64 x i8> %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -348,7 +350,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -358,10 +360,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -373,7 +375,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -382,9 +384,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -395,7 +397,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -405,10 +407,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -420,7 +422,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -429,9 +431,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -442,7 +444,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -452,10 +454,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -467,7 +469,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -476,9 +478,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -489,7 +491,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -499,10 +501,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -514,7 +516,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -523,9 +525,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -536,7 +538,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -546,10 +548,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -561,7 +563,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -570,9 +572,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -583,7 +585,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -593,10 +595,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re16.v v24, (a0)
@@ -609,7 +611,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -618,9 +620,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -631,7 +633,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -641,10 +643,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -678,7 +680,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -688,10 +690,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -703,7 +705,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -712,9 +714,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -725,7 +727,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -735,10 +737,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -750,7 +752,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -759,9 +761,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -772,7 +774,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -782,10 +784,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -797,7 +799,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -806,9 +808,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -819,7 +821,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -829,10 +831,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re32.v v24, (a0)
@@ -845,7 +847,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -854,9 +856,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -867,7 +869,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -877,10 +879,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv1i64_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -901,9 +903,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -914,7 +916,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -924,10 +926,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv2i64_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -939,7 +941,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -948,9 +950,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -961,7 +963,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -971,10 +973,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv4i64_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -986,7 +988,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -995,9 +997,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1018,10 +1020,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vv_nxv8i64_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8re64.v v24, (a0)
@@ -1034,7 +1036,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i64> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -1042,10 +1044,10 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsrl_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -1055,8 +1057,8 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1064,12 +1066,12 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1079,9 +1081,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1089,10 +1091,10 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsrl_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -1102,8 +1104,8 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1111,12 +1113,12 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1126,9 +1128,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1136,10 +1138,10 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsrl_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1149,8 +1151,8 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1158,12 +1160,12 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1173,9 +1175,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1183,10 +1185,10 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsrl_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1196,8 +1198,8 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1205,12 +1207,12 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1220,9 +1222,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1230,10 +1232,10 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsrl_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1243,8 +1245,8 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1252,12 +1254,12 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1267,9 +1269,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1277,10 +1279,10 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsrl_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -1290,8 +1292,8 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1299,12 +1301,12 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1314,9 +1316,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1324,10 +1326,10 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsrl_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -1337,8 +1339,8 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1346,12 +1348,12 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
-  i64,
+  iXLen,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1361,9 +1363,9 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1371,10 +1373,10 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsrl_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1384,8 +1386,8 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1393,12 +1395,12 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1408,9 +1410,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1418,10 +1420,10 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsrl_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1431,8 +1433,8 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1440,12 +1442,12 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1455,9 +1457,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1465,10 +1467,10 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsrl_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1478,8 +1480,8 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1487,12 +1489,12 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1502,9 +1504,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1512,10 +1514,10 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsrl_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1525,8 +1527,8 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1534,12 +1536,12 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1549,9 +1551,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1559,10 +1561,10 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsrl_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1572,8 +1574,8 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1581,12 +1583,12 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1596,9 +1598,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1606,10 +1608,10 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsrl_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1619,8 +1621,8 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1628,12 +1630,12 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1643,9 +1645,9 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1653,10 +1655,10 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsrl_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1666,8 +1668,8 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1675,12 +1677,12 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1690,9 +1692,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1700,10 +1702,10 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsrl_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1713,8 +1715,8 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1722,12 +1724,12 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1737,9 +1739,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1747,10 +1749,10 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsrl_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1760,8 +1762,8 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1769,12 +1771,12 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1784,9 +1786,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1794,10 +1796,10 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsrl_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1807,8 +1809,8 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1816,12 +1818,12 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1831,9 +1833,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1841,10 +1843,10 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsrl_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1854,8 +1856,8 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1863,12 +1865,12 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1878,9 +1880,9 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1888,10 +1890,10 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsrl_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -1901,8 +1903,8 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1910,12 +1912,12 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1925,9 +1927,9 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1935,10 +1937,10 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsrl_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -1948,8 +1950,8 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1957,12 +1959,12 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1972,9 +1974,9 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1982,10 +1984,10 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsrl_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -1995,8 +1997,8 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2004,12 +2006,12 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2019,9 +2021,9 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2029,10 +2031,10 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsrl_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -2042,8 +2044,8 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2051,12 +2053,12 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vx_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2066,14 +2068,14 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
@@ -2083,13 +2085,13 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i8_nxv1i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -2099,14 +2101,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vsrl.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
@@ -2116,13 +2118,13 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.nxv2i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i8_nxv2i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -2132,14 +2134,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vsrl.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
@@ -2149,13 +2151,13 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.nxv4i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i8_nxv4i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -2165,14 +2167,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vsrl.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
@@ -2182,13 +2184,13 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.nxv8i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i8_nxv8i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -2198,14 +2200,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vsrl.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
@@ -2215,13 +2217,13 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.nxv16i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i8_nxv16i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -2231,14 +2233,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vsrl.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
@@ -2248,13 +2250,13 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.nxv32i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i8_nxv32i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -2264,14 +2266,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vsrl.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
@@ -2281,13 +2283,13 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.nxv64i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv64i8_nxv64i8_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
@@ -2297,14 +2299,14 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vsrl.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 64 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
@@ -2314,13 +2316,13 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.nxv1i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i16_nxv1i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -2330,14 +2332,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsrl.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
@@ -2347,13 +2349,13 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.nxv2i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i16_nxv2i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -2363,14 +2365,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsrl.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
@@ -2380,13 +2382,13 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.nxv4i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i16_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -2396,14 +2398,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsrl.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
@@ -2413,13 +2415,13 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.nxv8i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i16_nxv8i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -2429,14 +2431,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsrl.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
@@ -2446,13 +2448,13 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.nxv16i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i16_nxv16i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -2462,14 +2464,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsrl.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
@@ -2479,13 +2481,13 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.nxv32i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv32i16_nxv32i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -2495,14 +2497,14 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsrl.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
@@ -2512,13 +2514,13 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.nxv1i32(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i32_nxv1i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -2528,14 +2530,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsrl.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
@@ -2545,13 +2547,13 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.nxv2i32(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i32_nxv2i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -2561,14 +2563,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsrl.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
@@ -2578,13 +2580,13 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.nxv4i32(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i32_nxv4i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -2594,14 +2596,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsrl.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
@@ -2611,13 +2613,13 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.nxv8i32(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i32_nxv8i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -2627,14 +2629,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsrl.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
@@ -2644,13 +2646,13 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.nxv16i32(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv16i32_nxv16i32_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -2660,14 +2662,14 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsrl.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
@@ -2677,13 +2679,13 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.nxv1i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv1i64_nxv1i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -2693,14 +2695,14 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsrl.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
@@ -2710,13 +2712,13 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.nxv2i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv2i64_nxv2i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -2726,14 +2728,14 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsrl.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
@@ -2743,13 +2745,13 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.nxv4i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv4i64_nxv4i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -2759,14 +2761,14 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsrl.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, ma
@@ -2776,13 +2778,13 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.nxv8i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
 
-define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsrl_mask_vi_nxv8i64_nxv8i64_i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -2792,9 +2794,9 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsrl.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }


        


More information about the llvm-commits mailing list