[llvm] 3d37e78 - [RISCV] Merge more rv32/rv64 vector intrinsic tests that contain the same content.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 25 13:21:57 PDT 2022


Author: Craig Topper
Date: 2022-06-25T13:21:44-07:00
New Revision: 3d37e785c77a2461abe43b82bd0ac247244f94f1

URL: https://github.com/llvm/llvm-project/commit/3d37e785c77a2461abe43b82bd0ac247244f94f1
DIFF: https://github.com/llvm/llvm-project/commit/3d37e785c77a2461abe43b82bd0ac247244f94f1.diff

LOG: [RISCV] Merge more rv32/rv64 vector intrinsic tests that contain the same content.

Added: 
    llvm/test/CodeGen/RISCV/rvv/vcompress.ll
    llvm/test/CodeGen/RISCV/rvv/vle.ll
    llvm/test/CodeGen/RISCV/rvv/vloxei.ll
    llvm/test/CodeGen/RISCV/rvv/vlse.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll
    llvm/test/CodeGen/RISCV/rvv/vse.ll
    llvm/test/CodeGen/RISCV/rvv/vsext.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
    llvm/test/CodeGen/RISCV/rvv/vsse.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
    llvm/test/CodeGen/RISCV/rvv/vzext.ll

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll

Removed: 
    llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
deleted file mode 100644
index c0ffdac248a4c..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv64.ll
+++ /dev/null
@@ -1,816 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8> %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>,
-  <vscale x 32 x i1>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half> %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>,
-  <vscale x 16 x i1>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float> %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>,
-  <vscale x 1 x i1>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v9, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double> %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>,
-  <vscale x 2 x i1>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v10, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double> %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>,
-  <vscale x 4 x i1>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v12, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double> %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>,
-  <vscale x 8 x i1>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
-; CHECK-NEXT:    vcompress.vm v8, v16, v0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double> %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll
similarity index 91%
rename from llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vcompress.ll
index f041854e3c7c6..76619733d7e94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcompress-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcompress.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs | FileCheck %s
 declare <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vcompress_vm_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i8> %a
 }
@@ -27,9 +29,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vcompress_vm_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
@@ -40,7 +42,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i8> %a
 }
@@ -49,9 +51,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vcompress_vm_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
@@ -62,7 +64,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i8> %a
 }
@@ -71,9 +73,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vcompress_vm_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
@@ -84,7 +86,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i8> %a
 }
@@ -93,9 +95,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vcompress_vm_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
@@ -106,7 +108,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i8> %a
 }
@@ -115,9 +117,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vcompress_vm_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
@@ -128,7 +130,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i8> %a
 }
@@ -137,9 +139,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vcompress_vm_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
@@ -150,7 +152,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8> %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 64 x i8> %a
 }
@@ -159,9 +161,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vcompress_vm_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -172,7 +174,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i16> %a
 }
@@ -181,9 +183,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vcompress_vm_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -194,7 +196,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i16> %a
 }
@@ -203,9 +205,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vcompress_vm_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -216,7 +218,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i16> %a
 }
@@ -225,9 +227,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vcompress_vm_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -238,7 +240,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i16> %a
 }
@@ -247,9 +249,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vcompress_vm_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -260,7 +262,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i16> %a
 }
@@ -269,9 +271,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vcompress_vm_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -282,7 +284,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x i16> %a
 }
@@ -291,9 +293,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vcompress_vm_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -304,7 +306,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i32> %a
 }
@@ -313,9 +315,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vcompress_vm_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -326,7 +328,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i32> %a
 }
@@ -335,9 +337,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vcompress_vm_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -348,7 +350,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i32> %a
 }
@@ -357,9 +359,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vcompress_vm_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -370,7 +372,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i32> %a
 }
@@ -379,9 +381,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vcompress_vm_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -392,7 +394,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x i32> %a
 }
@@ -401,9 +403,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vcompress_vm_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -414,7 +416,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x i64> %a
 }
@@ -423,9 +425,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vcompress_vm_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -436,7 +438,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x i64> %a
 }
@@ -445,9 +447,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vcompress_vm_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -458,7 +460,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x i64> %a
 }
@@ -467,9 +469,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vcompress_vm_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -480,7 +482,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x i64> %a
 }
@@ -489,9 +491,9 @@ declare <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vcompress_vm_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x half> %a
 }
@@ -511,9 +513,9 @@ declare <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vcompress_vm_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x half> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vcompress_vm_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
@@ -546,7 +548,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x half> %a
 }
@@ -555,9 +557,9 @@ declare <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vcompress_vm_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
@@ -568,7 +570,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x half> %a
 }
@@ -577,9 +579,9 @@ declare <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vcompress_vm_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
@@ -590,7 +592,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x half> %a
 }
@@ -599,9 +601,9 @@ declare <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vcompress_vm_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
@@ -612,7 +614,7 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x half> %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 32 x half> %a
 }
@@ -621,9 +623,9 @@ declare <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vcompress_vm_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
@@ -634,7 +636,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x float> %a
 }
@@ -643,9 +645,9 @@ declare <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vcompress_vm_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x float> %a
 }
@@ -665,9 +667,9 @@ declare <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vcompress_vm_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
@@ -678,7 +680,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x float> %a
 }
@@ -687,9 +689,9 @@ declare <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vcompress_vm_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
@@ -700,7 +702,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x float> %a
 }
@@ -709,9 +711,9 @@ declare <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vcompress_vm_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
@@ -722,7 +724,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float> %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 16 x float> %a
 }
@@ -731,9 +733,9 @@ declare <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vcompress_vm_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
@@ -744,7 +746,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double> %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 1 x double> %a
 }
@@ -753,9 +755,9 @@ declare <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vcompress_vm_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
@@ -766,7 +768,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double> %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 2 x double> %a
 }
@@ -775,9 +777,9 @@ declare <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vcompress_vm_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
@@ -788,7 +790,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double> %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 4 x double> %a
 }
@@ -797,9 +799,9 @@ declare <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vcompress_vm_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vcompress_vm_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
@@ -810,7 +812,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double> %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
deleted file mode 100644
index b78e288b39fa1..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vle-rv64.ll
+++ /dev/null
@@ -1,1594 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh,+experimental-zvfh \
-; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i64 %1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    i64 %1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    i64 %1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    i64 %1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vle64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    i64 %1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    i64 %1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    i64 %1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    i64 %1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    i64 %1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vle32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    i64 %1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    i64 %1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    i64 %1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    i64 %1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    i64 %1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
-    <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
-    i64 %1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vle16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
-    i64 %1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vle8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vle.ll
similarity index 88%
rename from llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vle.ll
index 3c1a30060ed69..0d8953420ec7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vle-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vle.ll
@@ -1,13 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -mattr=+zfh,+experimental-zvfh \
-; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vle_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -17,7 +18,7 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -26,10 +27,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vle.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vle_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -40,7 +41,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -48,9 +49,9 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vle_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -60,7 +61,7 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vle.nxv2i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -69,10 +70,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vle.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vle_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -83,7 +84,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -91,9 +92,9 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vle_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -103,7 +104,7 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vle.nxv4i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -112,10 +113,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vle.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vle_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -126,7 +127,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -134,9 +135,9 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vle_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -146,7 +147,7 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vle.nxv8i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -155,10 +156,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vle.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vle_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -169,7 +170,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -177,9 +178,9 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i32 %1) nounwind {
+define <vscale x 1 x double> @intrinsic_vle_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -189,7 +190,7 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vle.nxv1f64(
     <vscale x 1 x double> undef,
     <vscale x 1 x double>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x double> %a
 }
@@ -198,10 +199,10 @@ declare <vscale x 1 x double> @llvm.riscv.vle.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x double> @intrinsic_vle_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -212,7 +213,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -220,9 +221,9 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i32 %1) nounwind {
+define <vscale x 2 x double> @intrinsic_vle_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -232,7 +233,7 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vle.nxv2f64(
     <vscale x 2 x double> undef,
     <vscale x 2 x double>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x double> %a
 }
@@ -241,10 +242,10 @@ declare <vscale x 2 x double> @llvm.riscv.vle.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x double> @intrinsic_vle_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -255,7 +256,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -263,9 +264,9 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i32 %1) nounwind {
+define <vscale x 4 x double> @intrinsic_vle_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -275,7 +276,7 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vle.nxv4f64(
     <vscale x 4 x double> undef,
     <vscale x 4 x double>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x double> %a
 }
@@ -284,10 +285,10 @@ declare <vscale x 4 x double> @llvm.riscv.vle.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x double> @intrinsic_vle_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -298,7 +299,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -306,9 +307,9 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i32 %1) nounwind {
+define <vscale x 8 x double> @intrinsic_vle_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -318,7 +319,7 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vle.nxv8f64(
     <vscale x 8 x double> undef,
     <vscale x 8 x double>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x double> %a
 }
@@ -327,10 +328,10 @@ declare <vscale x 8 x double> @llvm.riscv.vle.mask.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x double> @intrinsic_vle_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -341,7 +342,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -349,9 +350,9 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vle_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -361,7 +362,7 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -370,10 +371,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vle.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vle_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -384,7 +385,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -392,9 +393,9 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vle_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -404,7 +405,7 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vle.nxv2i32(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -413,10 +414,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vle.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vle_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -427,7 +428,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -435,9 +436,9 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vle_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -447,7 +448,7 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vle.nxv4i32(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -456,10 +457,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vle.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vle_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -470,7 +471,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -478,9 +479,9 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vle_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -490,7 +491,7 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vle.nxv8i32(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -499,10 +500,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vle.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vle_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -513,7 +514,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -521,9 +522,9 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vle_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -533,7 +534,7 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vle.nxv16i32(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -542,10 +543,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vle.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vle_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -556,7 +557,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -564,9 +565,9 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i32 %1) nounwind {
+define <vscale x 1 x float> @intrinsic_vle_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -576,7 +577,7 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vle.nxv1f32(
     <vscale x 1 x float> undef,
     <vscale x 1 x float>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x float> %a
 }
@@ -585,10 +586,10 @@ declare <vscale x 1 x float> @llvm.riscv.vle.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x float> @intrinsic_vle_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -599,7 +600,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -607,9 +608,9 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i32 %1) nounwind {
+define <vscale x 2 x float> @intrinsic_vle_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -619,7 +620,7 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vle.nxv2f32(
     <vscale x 2 x float> undef,
     <vscale x 2 x float>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x float> %a
 }
@@ -628,10 +629,10 @@ declare <vscale x 2 x float> @llvm.riscv.vle.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x float> @intrinsic_vle_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -642,7 +643,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -650,9 +651,9 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i32 %1) nounwind {
+define <vscale x 4 x float> @intrinsic_vle_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -662,7 +663,7 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vle.nxv4f32(
     <vscale x 4 x float> undef,
     <vscale x 4 x float>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x float> %a
 }
@@ -671,10 +672,10 @@ declare <vscale x 4 x float> @llvm.riscv.vle.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x float> @intrinsic_vle_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -685,7 +686,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -693,9 +694,9 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i32 %1) nounwind {
+define <vscale x 8 x float> @intrinsic_vle_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -705,7 +706,7 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vle.nxv8f32(
     <vscale x 8 x float> undef,
     <vscale x 8 x float>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x float> %a
 }
@@ -714,10 +715,10 @@ declare <vscale x 8 x float> @llvm.riscv.vle.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x float> @intrinsic_vle_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -728,7 +729,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -736,9 +737,9 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i32 %1) nounwind {
+define <vscale x 16 x float> @intrinsic_vle_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -748,7 +749,7 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32(
     <vscale x 16 x float> undef,
     <vscale x 16 x float>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x float> %a
 }
@@ -757,10 +758,10 @@ declare <vscale x 16 x float> @llvm.riscv.vle.mask.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x float> @intrinsic_vle_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -771,7 +772,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -779,9 +780,9 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vle_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -791,7 +792,7 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vle.nxv1i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -800,10 +801,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vle.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vle_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -814,7 +815,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -822,9 +823,9 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vle_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -834,7 +835,7 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vle.nxv2i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -843,10 +844,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vle.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vle_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -857,7 +858,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -865,9 +866,9 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vle_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -877,7 +878,7 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vle.nxv4i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -886,10 +887,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vle.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vle_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -900,7 +901,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -908,9 +909,9 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vle_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -920,7 +921,7 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vle.nxv8i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -929,10 +930,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vle.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vle_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -943,7 +944,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -951,9 +952,9 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vle_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -963,7 +964,7 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vle.nxv16i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -972,10 +973,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vle.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vle_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -986,7 +987,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -994,9 +995,9 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vle_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1006,7 +1007,7 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vle.nxv32i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1015,10 +1016,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vle.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vle_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1029,7 +1030,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1037,9 +1038,9 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i32 %1) nounwind {
+define <vscale x 1 x half> @intrinsic_vle_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1049,7 +1050,7 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vle.nxv1f16(
     <vscale x 1 x half> undef,
     <vscale x 1 x half>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1058,10 +1059,10 @@ declare <vscale x 1 x half> @llvm.riscv.vle.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x half> @intrinsic_vle_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1072,7 +1073,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1080,9 +1081,9 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i32 %1) nounwind {
+define <vscale x 2 x half> @intrinsic_vle_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1092,7 +1093,7 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vle.nxv2f16(
     <vscale x 2 x half> undef,
     <vscale x 2 x half>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1101,10 +1102,10 @@ declare <vscale x 2 x half> @llvm.riscv.vle.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x half> @intrinsic_vle_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1115,7 +1116,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1123,9 +1124,9 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i32 %1) nounwind {
+define <vscale x 4 x half> @intrinsic_vle_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1135,7 +1136,7 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vle.nxv4f16(
     <vscale x 4 x half> undef,
     <vscale x 4 x half>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1144,10 +1145,10 @@ declare <vscale x 4 x half> @llvm.riscv.vle.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x half> @intrinsic_vle_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1158,7 +1159,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1166,9 +1167,9 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i32 %1) nounwind {
+define <vscale x 8 x half> @intrinsic_vle_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1178,7 +1179,7 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vle.nxv8f16(
     <vscale x 8 x half> undef,
     <vscale x 8 x half>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1187,10 +1188,10 @@ declare <vscale x 8 x half> @llvm.riscv.vle.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x half> @intrinsic_vle_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1201,7 +1202,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1209,9 +1210,9 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i32 %1) nounwind {
+define <vscale x 16 x half> @intrinsic_vle_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1221,7 +1222,7 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vle.nxv16f16(
     <vscale x 16 x half> undef,
     <vscale x 16 x half>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1230,10 +1231,10 @@ declare <vscale x 16 x half> @llvm.riscv.vle.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x half> @intrinsic_vle_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1244,7 +1245,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1252,9 +1253,9 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i32 %1) nounwind {
+define <vscale x 32 x half> @intrinsic_vle_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1264,7 +1265,7 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vle.nxv32f16(
     <vscale x 32 x half> undef,
     <vscale x 32 x half>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x half> %a
 }
@@ -1273,10 +1274,10 @@ declare <vscale x 32 x half> @llvm.riscv.vle.mask.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x half> @intrinsic_vle_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1287,7 +1288,7 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -1295,9 +1296,9 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vle_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1307,7 +1308,7 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vle.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1316,10 +1317,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vle.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vle_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1330,7 +1331,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1338,9 +1339,9 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vle_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1350,7 +1351,7 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vle.nxv2i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1359,10 +1360,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vle.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vle_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1373,7 +1374,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1381,9 +1382,9 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vle_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1393,7 +1394,7 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vle.nxv4i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1402,10 +1403,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vle.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vle_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1416,7 +1417,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1424,9 +1425,9 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vle_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1436,7 +1437,7 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vle.nxv8i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1445,10 +1446,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vle.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vle_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1459,7 +1460,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1467,9 +1468,9 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vle_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1479,7 +1480,7 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vle.nxv16i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1488,10 +1489,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vle.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vle_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1502,7 +1503,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1510,9 +1511,9 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vle_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1522,7 +1523,7 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vle.nxv32i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1531,10 +1532,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vle.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vle_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1545,7 +1546,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1553,9 +1554,9 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vle_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vle_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1565,7 +1566,7 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8>* %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1574,10 +1575,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vle.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vle_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vle_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1588,7 +1589,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8>* %1,
     <vscale x 64 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
index 2b328be8a6daf..188d342a0352c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv64.ll
@@ -1,6 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
+
+; The intrinsics are not supported with RV32.
+
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -1336,5062 +1339,3 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
-
-declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
-    <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
-    <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vloxei.ll
index f0942e54efbd4..23619f0d365d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -20,7 +22,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -30,10 +32,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i32(
   <vscale x 1 x i8>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -45,7 +47,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -54,9 +56,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -68,7 +70,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -78,10 +80,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i32(
   <vscale x 2 x i8>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -93,7 +95,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -102,9 +104,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -116,7 +118,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -126,10 +128,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i32(
   <vscale x 4 x i8>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -141,7 +143,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -150,9 +152,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -164,7 +166,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -174,10 +176,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i32(
   <vscale x 8 x i8>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -189,7 +191,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -198,9 +200,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -212,7 +214,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -222,10 +224,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i32(
   <vscale x 16 x i8>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -237,7 +239,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -246,9 +248,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -260,7 +262,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -270,10 +272,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -285,7 +287,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -294,9 +296,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -308,7 +310,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -333,7 +335,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -342,9 +344,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -356,7 +358,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -366,10 +368,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -381,7 +383,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -404,7 +406,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -414,10 +416,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -429,7 +431,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -438,9 +440,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -452,7 +454,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -462,10 +464,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -477,7 +479,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -486,9 +488,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -499,7 +501,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -546,7 +548,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -571,7 +573,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -593,7 +595,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -603,10 +605,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -618,7 +620,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -627,9 +629,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -650,10 +652,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -665,7 +667,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -674,9 +676,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -687,7 +689,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -697,10 +699,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -712,7 +714,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -721,9 +723,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -735,7 +737,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -745,10 +747,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -760,7 +762,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -769,9 +771,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -783,7 +785,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -793,10 +795,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -808,7 +810,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -817,9 +819,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -831,7 +833,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -841,10 +843,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -856,7 +858,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -865,9 +867,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -879,7 +881,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -889,10 +891,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -904,7 +906,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -913,9 +915,9 @@ declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -927,7 +929,7 @@ entry:
     <vscale x 1 x half> undef,
     <vscale x 1 x half>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -937,10 +939,10 @@ declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -952,7 +954,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -961,9 +963,9 @@ declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -975,7 +977,7 @@ entry:
     <vscale x 2 x half> undef,
     <vscale x 2 x half>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -985,10 +987,10 @@ declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1000,7 +1002,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1023,7 +1025,7 @@ entry:
     <vscale x 4 x half> undef,
     <vscale x 4 x half>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -1033,10 +1035,10 @@ declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1048,7 +1050,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1057,9 +1059,9 @@ declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1071,7 +1073,7 @@ entry:
     <vscale x 8 x half> undef,
     <vscale x 8 x half>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -1081,10 +1083,10 @@ declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1096,7 +1098,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1105,9 +1107,9 @@ declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1119,7 +1121,7 @@ entry:
     <vscale x 16 x half> undef,
     <vscale x 16 x half>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -1129,10 +1131,10 @@ declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1144,7 +1146,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1153,9 +1155,9 @@ declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1166,7 +1168,7 @@ entry:
     <vscale x 1 x float> undef,
     <vscale x 1 x float>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -1176,10 +1178,10 @@ declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1191,7 +1193,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1200,9 +1202,9 @@ declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1213,7 +1215,7 @@ entry:
     <vscale x 2 x float> undef,
     <vscale x 2 x float>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1223,10 +1225,10 @@ declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1238,7 +1240,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1247,9 +1249,9 @@ declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1260,7 +1262,7 @@ entry:
     <vscale x 4 x float> undef,
     <vscale x 4 x float>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1270,10 +1272,10 @@ declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1285,7 +1287,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1294,9 +1296,9 @@ declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1307,7 +1309,7 @@ entry:
     <vscale x 8 x float> undef,
     <vscale x 8 x float>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1317,10 +1319,10 @@ declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1332,7 +1334,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1341,9 +1343,9 @@ declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1354,7 +1356,7 @@ entry:
     <vscale x 16 x float> undef,
     <vscale x 16 x float>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1364,10 +1366,10 @@ declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1379,7 +1381,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1388,9 +1390,9 @@ declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1412,10 +1414,10 @@ declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1427,7 +1429,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1436,9 +1438,9 @@ declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1450,7 +1452,7 @@ entry:
     <vscale x 2 x double> undef,
     <vscale x 2 x double>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1460,10 +1462,10 @@ declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1475,7 +1477,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1484,9 +1486,9 @@ declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1498,7 +1500,7 @@ entry:
     <vscale x 4 x double> undef,
     <vscale x 4 x double>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1508,10 +1510,10 @@ declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1523,7 +1525,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1532,9 +1534,9 @@ declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1546,7 +1548,7 @@ entry:
     <vscale x 8 x double> undef,
     <vscale x 8 x double>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1556,10 +1558,10 @@ declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1571,7 +1573,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -1580,9 +1582,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1594,7 +1596,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1604,10 +1606,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1619,7 +1621,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1628,9 +1630,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1642,7 +1644,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1652,10 +1654,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1676,9 +1678,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1690,7 +1692,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1700,10 +1702,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1715,7 +1717,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1724,9 +1726,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1738,7 +1740,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1748,10 +1750,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1763,7 +1765,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1772,9 +1774,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1796,10 +1798,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1811,7 +1813,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1820,9 +1822,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1834,7 +1836,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8>* %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1844,10 +1846,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1859,7 +1861,7 @@ entry:
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1868,9 +1870,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1881,7 +1883,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1891,10 +1893,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1906,7 +1908,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1915,9 +1917,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1928,7 +1930,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1938,10 +1940,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1953,7 +1955,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1962,9 +1964,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1975,7 +1977,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1985,10 +1987,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2000,7 +2002,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -2009,9 +2011,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2022,7 +2024,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -2032,10 +2034,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2047,7 +2049,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -2056,9 +2058,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2069,7 +2071,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -2079,10 +2081,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2094,7 +2096,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -2103,9 +2105,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2116,7 +2118,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16>* %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -2126,10 +2128,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2141,7 +2143,7 @@ entry:
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -2150,9 +2152,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2164,7 +2166,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -2174,10 +2176,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2189,7 +2191,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -2198,9 +2200,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2212,7 +2214,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -2222,10 +2224,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2237,7 +2239,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -2246,9 +2248,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2260,7 +2262,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -2270,10 +2272,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2285,7 +2287,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -2294,9 +2296,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2308,7 +2310,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -2318,10 +2320,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2333,7 +2335,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -2342,9 +2344,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2356,7 +2358,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -2366,10 +2368,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2381,7 +2383,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -2390,9 +2392,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2404,7 +2406,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -2414,10 +2416,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2429,7 +2431,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -2438,9 +2440,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2452,7 +2454,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2462,10 +2464,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2477,7 +2479,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2486,9 +2488,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2500,7 +2502,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2510,10 +2512,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2525,7 +2527,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2534,9 +2536,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2548,7 +2550,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2558,10 +2560,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2573,7 +2575,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2582,9 +2584,9 @@ declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2595,7 +2597,7 @@ entry:
     <vscale x 1 x half> undef,
     <vscale x 1 x half>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -2605,10 +2607,10 @@ declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2620,7 +2622,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -2629,9 +2631,9 @@ declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2642,7 +2644,7 @@ entry:
     <vscale x 2 x half> undef,
     <vscale x 2 x half>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -2652,10 +2654,10 @@ declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2667,7 +2669,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -2676,9 +2678,9 @@ declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2689,7 +2691,7 @@ entry:
     <vscale x 4 x half> undef,
     <vscale x 4 x half>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -2699,10 +2701,10 @@ declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2714,7 +2716,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -2723,9 +2725,9 @@ declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2736,7 +2738,7 @@ entry:
     <vscale x 8 x half> undef,
     <vscale x 8 x half>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -2746,10 +2748,10 @@ declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2761,7 +2763,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -2770,9 +2772,9 @@ declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2783,7 +2785,7 @@ entry:
     <vscale x 16 x half> undef,
     <vscale x 16 x half>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -2793,10 +2795,10 @@ declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2808,7 +2810,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -2817,9 +2819,9 @@ declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2830,7 +2832,7 @@ entry:
     <vscale x 32 x half> undef,
     <vscale x 32 x half>* %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -2840,10 +2842,10 @@ declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2855,7 +2857,7 @@ entry:
     <vscale x 32 x half>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -2864,9 +2866,9 @@ declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2878,7 +2880,7 @@ entry:
     <vscale x 1 x float> undef,
     <vscale x 1 x float>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -2888,10 +2890,10 @@ declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2903,7 +2905,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -2912,9 +2914,9 @@ declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2926,7 +2928,7 @@ entry:
     <vscale x 2 x float> undef,
     <vscale x 2 x float>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -2936,10 +2938,10 @@ declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2951,7 +2953,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -2960,9 +2962,9 @@ declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2974,7 +2976,7 @@ entry:
     <vscale x 4 x float> undef,
     <vscale x 4 x float>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -2984,10 +2986,10 @@ declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2999,7 +3001,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -3008,9 +3010,9 @@ declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3022,7 +3024,7 @@ entry:
     <vscale x 8 x float> undef,
     <vscale x 8 x float>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -3032,10 +3034,10 @@ declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3047,7 +3049,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -3056,9 +3058,9 @@ declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3070,7 +3072,7 @@ entry:
     <vscale x 16 x float> undef,
     <vscale x 16 x float>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -3080,10 +3082,10 @@ declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3095,7 +3097,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -3104,9 +3106,9 @@ declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3118,7 +3120,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -3128,10 +3130,10 @@ declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i16(
   <vscale x 1 x double>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3143,7 +3145,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -3152,9 +3154,9 @@ declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3166,7 +3168,7 @@ entry:
     <vscale x 2 x double> undef,
     <vscale x 2 x double>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -3176,10 +3178,10 @@ declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i16(
   <vscale x 2 x double>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3191,7 +3193,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -3200,9 +3202,9 @@ declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3214,7 +3216,7 @@ entry:
     <vscale x 4 x double> undef,
     <vscale x 4 x double>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -3224,10 +3226,10 @@ declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i16(
   <vscale x 4 x double>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3239,7 +3241,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -3248,9 +3250,9 @@ declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3262,7 +3264,7 @@ entry:
     <vscale x 8 x double> undef,
     <vscale x 8 x double>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -3272,10 +3274,10 @@ declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i16(
   <vscale x 8 x double>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3287,7 +3289,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -3296,9 +3298,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3309,7 +3311,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -3319,10 +3321,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3334,7 +3336,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -3343,9 +3345,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3356,7 +3358,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -3366,10 +3368,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3381,7 +3383,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -3390,9 +3392,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3403,7 +3405,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -3413,10 +3415,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3428,7 +3430,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -3437,9 +3439,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3450,7 +3452,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -3460,10 +3462,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3475,7 +3477,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -3484,9 +3486,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vloxei.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3497,7 +3499,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -3507,10 +3509,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vloxei.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3522,7 +3524,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -3531,9 +3533,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vloxei.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3544,7 +3546,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8>* %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -3554,10 +3556,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vloxei.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3569,7 +3571,7 @@ entry:
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -3578,9 +3580,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vloxei.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3591,7 +3593,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8>* %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -3601,10 +3603,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vloxei.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>*,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3616,7 +3618,7 @@ entry:
     <vscale x 64 x i8>* %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -3625,9 +3627,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3639,7 +3641,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -3649,10 +3651,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3664,7 +3666,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -3673,9 +3675,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3687,7 +3689,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -3697,10 +3699,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3712,7 +3714,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -3721,9 +3723,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3735,7 +3737,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -3745,10 +3747,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3760,7 +3762,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -3769,9 +3771,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3783,7 +3785,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -3793,10 +3795,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3808,7 +3810,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -3817,9 +3819,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vloxei.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3831,7 +3833,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -3841,10 +3843,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vloxei.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3856,7 +3858,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -3865,9 +3867,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vloxei.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3879,7 +3881,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16>* %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -3889,10 +3891,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vloxei.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3904,7 +3906,7 @@ entry:
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -3913,9 +3915,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3927,7 +3929,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -3937,10 +3939,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3952,7 +3954,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -3961,9 +3963,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -3975,7 +3977,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -3985,10 +3987,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4000,7 +4002,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -4009,9 +4011,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4023,7 +4025,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -4033,10 +4035,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4048,7 +4050,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -4057,9 +4059,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4071,7 +4073,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -4081,10 +4083,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4096,7 +4098,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -4105,9 +4107,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vloxei.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4119,7 +4121,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -4129,10 +4131,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vloxei.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4144,7 +4146,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -4153,9 +4155,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4167,7 +4169,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -4177,10 +4179,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4192,7 +4194,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -4201,9 +4203,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4215,7 +4217,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -4225,10 +4227,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4240,7 +4242,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -4249,9 +4251,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4263,7 +4265,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -4273,10 +4275,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4288,7 +4290,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -4297,9 +4299,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4311,7 +4313,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -4321,10 +4323,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4336,7 +4338,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -4345,9 +4347,9 @@ declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4359,7 +4361,7 @@ entry:
     <vscale x 1 x half> undef,
     <vscale x 1 x half>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -4369,10 +4371,10 @@ declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4384,7 +4386,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -4393,9 +4395,9 @@ declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4407,7 +4409,7 @@ entry:
     <vscale x 2 x half> undef,
     <vscale x 2 x half>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -4417,10 +4419,10 @@ declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4432,7 +4434,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -4441,9 +4443,9 @@ declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4455,7 +4457,7 @@ entry:
     <vscale x 4 x half> undef,
     <vscale x 4 x half>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -4465,10 +4467,10 @@ declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4480,7 +4482,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -4489,9 +4491,9 @@ declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4503,7 +4505,7 @@ entry:
     <vscale x 8 x half> undef,
     <vscale x 8 x half>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -4513,10 +4515,10 @@ declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4528,7 +4530,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -4537,9 +4539,9 @@ declare <vscale x 16 x half> @llvm.riscv.vloxei.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4551,7 +4553,7 @@ entry:
     <vscale x 16 x half> undef,
     <vscale x 16 x half>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -4561,10 +4563,10 @@ declare <vscale x 16 x half> @llvm.riscv.vloxei.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4576,7 +4578,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -4585,9 +4587,9 @@ declare <vscale x 32 x half> @llvm.riscv.vloxei.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4599,7 +4601,7 @@ entry:
     <vscale x 32 x half> undef,
     <vscale x 32 x half>* %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -4609,10 +4611,10 @@ declare <vscale x 32 x half> @llvm.riscv.vloxei.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4624,7 +4626,7 @@ entry:
     <vscale x 32 x half>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -4633,9 +4635,9 @@ declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4647,7 +4649,7 @@ entry:
     <vscale x 1 x float> undef,
     <vscale x 1 x float>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -4657,10 +4659,10 @@ declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i8(
   <vscale x 1 x float>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4672,7 +4674,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -4681,9 +4683,9 @@ declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4695,7 +4697,7 @@ entry:
     <vscale x 2 x float> undef,
     <vscale x 2 x float>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -4705,10 +4707,10 @@ declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i8(
   <vscale x 2 x float>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4720,7 +4722,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -4729,9 +4731,9 @@ declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4743,7 +4745,7 @@ entry:
     <vscale x 4 x float> undef,
     <vscale x 4 x float>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -4753,10 +4755,10 @@ declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i8(
   <vscale x 4 x float>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4768,7 +4770,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -4777,9 +4779,9 @@ declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4791,7 +4793,7 @@ entry:
     <vscale x 8 x float> undef,
     <vscale x 8 x float>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -4801,10 +4803,10 @@ declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i8(
   <vscale x 8 x float>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4816,7 +4818,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -4825,9 +4827,9 @@ declare <vscale x 16 x float> @llvm.riscv.vloxei.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4839,7 +4841,7 @@ entry:
     <vscale x 16 x float> undef,
     <vscale x 16 x float>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -4849,10 +4851,10 @@ declare <vscale x 16 x float> @llvm.riscv.vloxei.mask.nxv16f32.nxv16i8(
   <vscale x 16 x float>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4864,7 +4866,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -4873,9 +4875,9 @@ declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4887,7 +4889,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -4897,10 +4899,10 @@ declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i8(
   <vscale x 1 x double>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4912,7 +4914,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -4921,9 +4923,9 @@ declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4935,7 +4937,7 @@ entry:
     <vscale x 2 x double> undef,
     <vscale x 2 x double>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -4945,10 +4947,10 @@ declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i8(
   <vscale x 2 x double>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4960,7 +4962,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -4969,9 +4971,9 @@ declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4983,7 +4985,7 @@ entry:
     <vscale x 4 x double> undef,
     <vscale x 4 x double>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -4993,10 +4995,10 @@ declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i8(
   <vscale x 4 x double>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -5008,7 +5010,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -5017,9 +5019,9 @@ declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -5031,7 +5033,7 @@ entry:
     <vscale x 8 x double> undef,
     <vscale x 8 x double>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -5041,10 +5043,10 @@ declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i8(
   <vscale x 8 x double>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -5056,7 +5058,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
deleted file mode 100644
index 4b06849c66e51..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv32.ll
+++ /dev/null
@@ -1,1741 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; CHECK-NEXT:    vlse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  i32,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  i32,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  i32,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  i32,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  i32,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  i32,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  i32,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
-    <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  i32,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; CHECK-NEXT:    vlse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  i32,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  i32,
-  <vscale x 64 x i1>,
-  i32,
-  i32);
-
-define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
-; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    i32 %2,
-    <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 64 x i8> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlse.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vlse.ll
index 63c94a41dcf5f..b954c2af8e02a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlse.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vlse_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -17,8 +19,8 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.nxv1i64(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -26,12 +28,12 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vlse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -41,9 +43,9 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vlse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -51,10 +53,10 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vlse_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -64,8 +66,8 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.nxv2i64(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -73,12 +75,12 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vlse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -88,9 +90,9 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vlse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -98,10 +100,10 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vlse_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -111,8 +113,8 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.nxv4i64(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -120,12 +122,12 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vlse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -135,9 +137,9 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vlse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -145,10 +147,10 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vlse_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -158,8 +160,8 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.nxv8i64(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -167,12 +169,12 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vlse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -182,9 +184,9 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vlse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -192,10 +194,10 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vlse_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -205,8 +207,8 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.nxv1f64(
     <vscale x 1 x double> undef,
     <vscale x 1 x double>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -214,12 +216,12 @@ entry:
 declare <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vlse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -229,9 +231,9 @@ entry:
   %a = call <vscale x 1 x double> @llvm.riscv.vlse.mask.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -239,10 +241,10 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vlse_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -252,8 +254,8 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.nxv2f64(
     <vscale x 2 x double> undef,
     <vscale x 2 x double>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -261,12 +263,12 @@ entry:
 declare <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vlse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -276,9 +278,9 @@ entry:
   %a = call <vscale x 2 x double> @llvm.riscv.vlse.mask.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -286,10 +288,10 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vlse_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -299,8 +301,8 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.nxv4f64(
     <vscale x 4 x double> undef,
     <vscale x 4 x double>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -308,12 +310,12 @@ entry:
 declare <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vlse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -323,9 +325,9 @@ entry:
   %a = call <vscale x 4 x double> @llvm.riscv.vlse.mask.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -333,10 +335,10 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vlse_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -346,8 +348,8 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.nxv8f64(
     <vscale x 8 x double> undef,
     <vscale x 8 x double>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -355,12 +357,12 @@ entry:
 declare <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vlse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -370,9 +372,9 @@ entry:
   %a = call <vscale x 8 x double> @llvm.riscv.vlse.mask.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -380,10 +382,10 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vlse_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -393,8 +395,8 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.nxv1i32(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -402,12 +404,12 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vlse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -417,9 +419,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vlse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -427,10 +429,10 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vlse_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -440,8 +442,8 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.nxv2i32(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -449,12 +451,12 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vlse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -464,9 +466,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vlse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -474,10 +476,10 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vlse_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -487,8 +489,8 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.nxv4i32(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -496,12 +498,12 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vlse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -511,9 +513,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vlse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -521,10 +523,10 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vlse_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -534,8 +536,8 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.nxv8i32(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -543,12 +545,12 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vlse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -558,9 +560,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vlse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -568,10 +570,10 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vlse_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -581,8 +583,8 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.nxv16i32(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -590,12 +592,12 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vlse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -605,9 +607,9 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vlse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -615,10 +617,10 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vlse_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -628,8 +630,8 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.nxv1f32(
     <vscale x 1 x float> undef,
     <vscale x 1 x float>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -637,12 +639,12 @@ entry:
 declare <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vlse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -652,9 +654,9 @@ entry:
   %a = call <vscale x 1 x float> @llvm.riscv.vlse.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -662,10 +664,10 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vlse_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -675,8 +677,8 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.nxv2f32(
     <vscale x 2 x float> undef,
     <vscale x 2 x float>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -684,12 +686,12 @@ entry:
 declare <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vlse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -699,9 +701,9 @@ entry:
   %a = call <vscale x 2 x float> @llvm.riscv.vlse.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -709,10 +711,10 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vlse_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -722,8 +724,8 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.nxv4f32(
     <vscale x 4 x float> undef,
     <vscale x 4 x float>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -731,12 +733,12 @@ entry:
 declare <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vlse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -746,9 +748,9 @@ entry:
   %a = call <vscale x 4 x float> @llvm.riscv.vlse.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -756,10 +758,10 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vlse_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -769,8 +771,8 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.nxv8f32(
     <vscale x 8 x float> undef,
     <vscale x 8 x float>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -778,12 +780,12 @@ entry:
 declare <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vlse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -793,9 +795,9 @@ entry:
   %a = call <vscale x 8 x float> @llvm.riscv.vlse.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -803,10 +805,10 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vlse_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -816,8 +818,8 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.nxv16f32(
     <vscale x 16 x float> undef,
     <vscale x 16 x float>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -825,12 +827,12 @@ entry:
 declare <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vlse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -840,9 +842,9 @@ entry:
   %a = call <vscale x 16 x float> @llvm.riscv.vlse.mask.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -850,10 +852,10 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vlse_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -863,8 +865,8 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.nxv1i16(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -872,12 +874,12 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vlse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -887,9 +889,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vlse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -897,10 +899,10 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vlse_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -910,8 +912,8 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.nxv2i16(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -919,12 +921,12 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vlse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -934,9 +936,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vlse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -944,10 +946,10 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vlse_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -957,8 +959,8 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.nxv4i16(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -966,12 +968,12 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vlse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -981,9 +983,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vlse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -991,10 +993,10 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vlse_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1004,8 +1006,8 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.nxv8i16(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1013,12 +1015,12 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vlse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1028,9 +1030,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vlse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1038,10 +1040,10 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vlse_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1051,8 +1053,8 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.nxv16i16(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1060,12 +1062,12 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vlse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1075,9 +1077,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vlse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1085,10 +1087,10 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vlse_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1098,8 +1100,8 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.nxv32i16(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1107,12 +1109,12 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vlse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1122,9 +1124,9 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vlse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1132,10 +1134,10 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vlse_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -1145,8 +1147,8 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.nxv1f16(
     <vscale x 1 x half> undef,
     <vscale x 1 x half>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -1154,12 +1156,12 @@ entry:
 declare <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vlse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -1169,9 +1171,9 @@ entry:
   %a = call <vscale x 1 x half> @llvm.riscv.vlse.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -1179,10 +1181,10 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vlse_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -1192,8 +1194,8 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.nxv2f16(
     <vscale x 2 x half> undef,
     <vscale x 2 x half>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -1201,12 +1203,12 @@ entry:
 declare <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vlse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -1216,9 +1218,9 @@ entry:
   %a = call <vscale x 2 x half> @llvm.riscv.vlse.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1226,10 +1228,10 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vlse_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -1239,8 +1241,8 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.nxv4f16(
     <vscale x 4 x half> undef,
     <vscale x 4 x half>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -1248,12 +1250,12 @@ entry:
 declare <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vlse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -1263,9 +1265,9 @@ entry:
   %a = call <vscale x 4 x half> @llvm.riscv.vlse.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1273,10 +1275,10 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vlse_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1286,8 +1288,8 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.nxv8f16(
     <vscale x 8 x half> undef,
     <vscale x 8 x half>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -1295,12 +1297,12 @@ entry:
 declare <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vlse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1310,9 +1312,9 @@ entry:
   %a = call <vscale x 8 x half> @llvm.riscv.vlse.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1320,10 +1322,10 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vlse_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1333,8 +1335,8 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.nxv16f16(
     <vscale x 16 x half> undef,
     <vscale x 16 x half>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -1342,12 +1344,12 @@ entry:
 declare <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vlse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1357,9 +1359,9 @@ entry:
   %a = call <vscale x 16 x half> @llvm.riscv.vlse.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1367,10 +1369,10 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vlse_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1380,8 +1382,8 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.nxv32f16(
     <vscale x 32 x half> undef,
     <vscale x 32 x half>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -1389,12 +1391,12 @@ entry:
 declare <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vlse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1404,9 +1406,9 @@ entry:
   %a = call <vscale x 32 x half> @llvm.riscv.vlse.mask.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -1414,10 +1416,10 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vlse_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
@@ -1427,8 +1429,8 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.nxv1i8(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1436,12 +1438,12 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vlse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
@@ -1451,9 +1453,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vlse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1461,10 +1463,10 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vlse_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
@@ -1474,8 +1476,8 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.nxv2i8(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1483,12 +1485,12 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vlse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
@@ -1498,9 +1500,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vlse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1508,10 +1510,10 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vlse_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
@@ -1521,8 +1523,8 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.nxv4i8(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1530,12 +1532,12 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vlse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
@@ -1545,9 +1547,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vlse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1555,10 +1557,10 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vlse_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
@@ -1568,8 +1570,8 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.nxv8i8(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1577,12 +1579,12 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vlse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
@@ -1592,9 +1594,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vlse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1602,10 +1604,10 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vlse_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
@@ -1615,8 +1617,8 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.nxv16i8(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1624,12 +1626,12 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vlse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
@@ -1639,9 +1641,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vlse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1649,10 +1651,10 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vlse_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
@@ -1662,8 +1664,8 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.nxv32i8(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1671,12 +1673,12 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vlse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
@@ -1686,9 +1688,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vlse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1696,10 +1698,10 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i64 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vlse_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
@@ -1709,8 +1711,8 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.nxv64i8(
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8>* %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -1718,12 +1720,12 @@ entry:
 declare <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
-  i64,
+  iXLen,
   <vscale x 64 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vlse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
@@ -1733,9 +1735,9 @@ entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vlse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
index 644b40597c3cc..2f19c9de9761c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv64.ll
@@ -1,6 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
+
+; The intrinsics are not supported with RV32.
+
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -1336,5062 +1339,3 @@ entry:
 
   ret <vscale x 8 x double> %a
 }
-
-declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    <vscale x 16 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei32.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
-    <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
-    <vscale x 32 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i8>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i8>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i8>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i8>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i8>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i8>* %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i8>,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> undef,
-    <vscale x 64 x i8>* %0,
-    <vscale x 64 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64,
-  i64);
-
-define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i16>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i16>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i16>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i16>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i16>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i16>* %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i32>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i32>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i32>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i32>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i32>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i64>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i64>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i64>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i64>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
-    <vscale x 1 x half> undef,
-    <vscale x 1 x half>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
-    <vscale x 2 x half> undef,
-    <vscale x 2 x half>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
-    <vscale x 4 x half> undef,
-    <vscale x 4 x half>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
-    <vscale x 8 x half> undef,
-    <vscale x 8 x half>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
-    <vscale x 16 x half> undef,
-    <vscale x 16 x half>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
-    <vscale x 32 x half> undef,
-    <vscale x 32 x half>* %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
-    <vscale x 1 x float> undef,
-    <vscale x 1 x float>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
-    <vscale x 2 x float> undef,
-    <vscale x 2 x float>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
-    <vscale x 4 x float> undef,
-    <vscale x 4 x float>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
-    <vscale x 8 x float> undef,
-    <vscale x 8 x float>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
-    <vscale x 16 x float> undef,
-    <vscale x 16 x float>* %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v9, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
-    <vscale x 1 x double> undef,
-    <vscale x 1 x double>* %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v10, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
-    <vscale x 2 x double> undef,
-    <vscale x 2 x double>* %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v12, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
-    <vscale x 4 x double> undef,
-    <vscale x 4 x double>* %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v16, (a0), v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
-    <vscale x 8 x double> undef,
-    <vscale x 8 x double>* %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x double> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll
similarity index 87%
rename from llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vluxei.ll
index b15e8ab16b678..8772109523380 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei.ll
@@ -1,14 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh,+f,+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -20,7 +22,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -30,10 +32,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i32(
   <vscale x 1 x i8>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -45,7 +47,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -54,9 +56,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -68,7 +70,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -78,10 +80,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i32(
   <vscale x 2 x i8>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -93,7 +95,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -102,9 +104,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -116,7 +118,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -126,10 +128,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i32(
   <vscale x 4 x i8>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -141,7 +143,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -150,9 +152,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -164,7 +166,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -174,10 +176,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i32(
   <vscale x 8 x i8>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -189,7 +191,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -198,9 +200,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -212,7 +214,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -222,10 +224,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i32(
   <vscale x 16 x i8>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -237,7 +239,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -246,9 +248,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -260,7 +262,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -270,10 +272,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -285,7 +287,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -294,9 +296,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -308,7 +310,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -318,10 +320,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -333,7 +335,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -342,9 +344,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -356,7 +358,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -366,10 +368,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -381,7 +383,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -390,9 +392,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -404,7 +406,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -414,10 +416,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -429,7 +431,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -438,9 +440,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -452,7 +454,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -462,10 +464,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -477,7 +479,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -486,9 +488,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -499,7 +501,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -509,10 +511,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -533,9 +535,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -546,7 +548,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -556,10 +558,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -571,7 +573,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -580,9 +582,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -593,7 +595,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -603,10 +605,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -618,7 +620,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -627,9 +629,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -650,10 +652,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -665,7 +667,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -674,9 +676,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -687,7 +689,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -697,10 +699,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -712,7 +714,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -721,9 +723,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -735,7 +737,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -745,10 +747,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -760,7 +762,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -769,9 +771,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -783,7 +785,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -793,10 +795,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -808,7 +810,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -817,9 +819,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -831,7 +833,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -841,10 +843,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -856,7 +858,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -865,9 +867,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -879,7 +881,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -889,10 +891,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -904,7 +906,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -913,9 +915,9 @@ declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -927,7 +929,7 @@ entry:
     <vscale x 1 x half> undef,
     <vscale x 1 x half>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -937,10 +939,10 @@ declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -952,7 +954,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -961,9 +963,9 @@ declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -975,7 +977,7 @@ entry:
     <vscale x 2 x half> undef,
     <vscale x 2 x half>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -985,10 +987,10 @@ declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1000,7 +1002,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -1009,9 +1011,9 @@ declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1023,7 +1025,7 @@ entry:
     <vscale x 4 x half> undef,
     <vscale x 4 x half>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -1033,10 +1035,10 @@ declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1048,7 +1050,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -1057,9 +1059,9 @@ declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1071,7 +1073,7 @@ entry:
     <vscale x 8 x half> undef,
     <vscale x 8 x half>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -1081,10 +1083,10 @@ declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1096,7 +1098,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -1105,9 +1107,9 @@ declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1119,7 +1121,7 @@ entry:
     <vscale x 16 x half> undef,
     <vscale x 16 x half>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -1129,10 +1131,10 @@ declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1144,7 +1146,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -1153,9 +1155,9 @@ declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1166,7 +1168,7 @@ entry:
     <vscale x 1 x float> undef,
     <vscale x 1 x float>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -1176,10 +1178,10 @@ declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1191,7 +1193,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -1200,9 +1202,9 @@ declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1213,7 +1215,7 @@ entry:
     <vscale x 2 x float> undef,
     <vscale x 2 x float>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -1223,10 +1225,10 @@ declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1238,7 +1240,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -1247,9 +1249,9 @@ declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1260,7 +1262,7 @@ entry:
     <vscale x 4 x float> undef,
     <vscale x 4 x float>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -1270,10 +1272,10 @@ declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1285,7 +1287,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -1294,9 +1296,9 @@ declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1307,7 +1309,7 @@ entry:
     <vscale x 8 x float> undef,
     <vscale x 8 x float>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -1317,10 +1319,10 @@ declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1332,7 +1334,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -1341,9 +1343,9 @@ declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float>* %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1354,7 +1356,7 @@ entry:
     <vscale x 16 x float> undef,
     <vscale x 16 x float>* %0,
     <vscale x 16 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -1364,10 +1366,10 @@ declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1379,7 +1381,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -1388,9 +1390,9 @@ declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double>* %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1402,7 +1404,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double>* %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -1412,10 +1414,10 @@ declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1427,7 +1429,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -1436,9 +1438,9 @@ declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double>* %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1450,7 +1452,7 @@ entry:
     <vscale x 2 x double> undef,
     <vscale x 2 x double>* %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -1460,10 +1462,10 @@ declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1475,7 +1477,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -1484,9 +1486,9 @@ declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double>* %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1498,7 +1500,7 @@ entry:
     <vscale x 4 x double> undef,
     <vscale x 4 x double>* %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -1508,10 +1510,10 @@ declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1523,7 +1525,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -1532,9 +1534,9 @@ declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double>* %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1546,7 +1548,7 @@ entry:
     <vscale x 8 x double> undef,
     <vscale x 8 x double>* %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -1556,10 +1558,10 @@ declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1571,7 +1573,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -1580,9 +1582,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1594,7 +1596,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1604,10 +1606,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1619,7 +1621,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -1628,9 +1630,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1642,7 +1644,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1652,10 +1654,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1667,7 +1669,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -1676,9 +1678,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1690,7 +1692,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1700,10 +1702,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1715,7 +1717,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -1724,9 +1726,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1738,7 +1740,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1748,10 +1750,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1763,7 +1765,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1772,9 +1774,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1786,7 +1788,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1796,10 +1798,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1811,7 +1813,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1820,9 +1822,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1834,7 +1836,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8>* %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1844,10 +1846,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1859,7 +1861,7 @@ entry:
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1868,9 +1870,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1881,7 +1883,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1891,10 +1893,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1906,7 +1908,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1915,9 +1917,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1928,7 +1930,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1938,10 +1940,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1953,7 +1955,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1962,9 +1964,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1975,7 +1977,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1985,10 +1987,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2000,7 +2002,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -2009,9 +2011,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2022,7 +2024,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -2032,10 +2034,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2047,7 +2049,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -2056,9 +2058,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2069,7 +2071,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -2079,10 +2081,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2094,7 +2096,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -2103,9 +2105,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2116,7 +2118,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16>* %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -2126,10 +2128,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2141,7 +2143,7 @@ entry:
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -2150,9 +2152,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2164,7 +2166,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -2174,10 +2176,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2189,7 +2191,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -2198,9 +2200,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2212,7 +2214,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -2222,10 +2224,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2237,7 +2239,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -2246,9 +2248,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2260,7 +2262,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -2270,10 +2272,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2285,7 +2287,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -2294,9 +2296,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2308,7 +2310,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -2318,10 +2320,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2333,7 +2335,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -2342,9 +2344,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2356,7 +2358,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -2366,10 +2368,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2381,7 +2383,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -2390,9 +2392,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2404,7 +2406,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -2414,10 +2416,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2429,7 +2431,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -2438,9 +2440,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2452,7 +2454,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2462,10 +2464,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2477,7 +2479,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -2486,9 +2488,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2500,7 +2502,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2510,10 +2512,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2525,7 +2527,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -2534,9 +2536,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2548,7 +2550,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2558,10 +2560,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2573,7 +2575,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -2582,9 +2584,9 @@ declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2595,7 +2597,7 @@ entry:
     <vscale x 1 x half> undef,
     <vscale x 1 x half>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -2605,10 +2607,10 @@ declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2620,7 +2622,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -2629,9 +2631,9 @@ declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2642,7 +2644,7 @@ entry:
     <vscale x 2 x half> undef,
     <vscale x 2 x half>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -2652,10 +2654,10 @@ declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2667,7 +2669,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -2676,9 +2678,9 @@ declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2689,7 +2691,7 @@ entry:
     <vscale x 4 x half> undef,
     <vscale x 4 x half>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -2699,10 +2701,10 @@ declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2714,7 +2716,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -2723,9 +2725,9 @@ declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2736,7 +2738,7 @@ entry:
     <vscale x 8 x half> undef,
     <vscale x 8 x half>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -2746,10 +2748,10 @@ declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2761,7 +2763,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -2770,9 +2772,9 @@ declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2783,7 +2785,7 @@ entry:
     <vscale x 16 x half> undef,
     <vscale x 16 x half>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -2793,10 +2795,10 @@ declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2808,7 +2810,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -2817,9 +2819,9 @@ declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half>* %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2830,7 +2832,7 @@ entry:
     <vscale x 32 x half> undef,
     <vscale x 32 x half>* %0,
     <vscale x 32 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -2840,10 +2842,10 @@ declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2855,7 +2857,7 @@ entry:
     <vscale x 32 x half>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -2864,9 +2866,9 @@ declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2878,7 +2880,7 @@ entry:
     <vscale x 1 x float> undef,
     <vscale x 1 x float>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -2888,10 +2890,10 @@ declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2903,7 +2905,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -2912,9 +2914,9 @@ declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2926,7 +2928,7 @@ entry:
     <vscale x 2 x float> undef,
     <vscale x 2 x float>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -2936,10 +2938,10 @@ declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2951,7 +2953,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -2960,9 +2962,9 @@ declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2974,7 +2976,7 @@ entry:
     <vscale x 4 x float> undef,
     <vscale x 4 x float>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -2984,10 +2986,10 @@ declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2999,7 +3001,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -3008,9 +3010,9 @@ declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3022,7 +3024,7 @@ entry:
     <vscale x 8 x float> undef,
     <vscale x 8 x float>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -3032,10 +3034,10 @@ declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3047,7 +3049,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -3056,9 +3058,9 @@ declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float>* %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3070,7 +3072,7 @@ entry:
     <vscale x 16 x float> undef,
     <vscale x 16 x float>* %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -3080,10 +3082,10 @@ declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3095,7 +3097,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -3104,9 +3106,9 @@ declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double>* %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3118,7 +3120,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double>* %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -3128,10 +3130,10 @@ declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i16(
   <vscale x 1 x double>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3143,7 +3145,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -3152,9 +3154,9 @@ declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double>* %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3166,7 +3168,7 @@ entry:
     <vscale x 2 x double> undef,
     <vscale x 2 x double>* %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -3176,10 +3178,10 @@ declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i16(
   <vscale x 2 x double>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3191,7 +3193,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -3200,9 +3202,9 @@ declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double>* %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3214,7 +3216,7 @@ entry:
     <vscale x 4 x double> undef,
     <vscale x 4 x double>* %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -3224,10 +3226,10 @@ declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i16(
   <vscale x 4 x double>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3239,7 +3241,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -3248,9 +3250,9 @@ declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double>* %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3262,7 +3264,7 @@ entry:
     <vscale x 8 x double> undef,
     <vscale x 8 x double>* %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -3272,10 +3274,10 @@ declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i16(
   <vscale x 8 x double>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3287,7 +3289,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }
@@ -3296,9 +3298,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3309,7 +3311,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i8>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -3319,10 +3321,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3334,7 +3336,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -3343,9 +3345,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3356,7 +3358,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i8>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -3366,10 +3368,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3381,7 +3383,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -3390,9 +3392,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3403,7 +3405,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i8>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -3413,10 +3415,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3428,7 +3430,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -3437,9 +3439,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3450,7 +3452,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i8>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -3460,10 +3462,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3475,7 +3477,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -3484,9 +3486,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vluxei.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3497,7 +3499,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i8>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -3507,10 +3509,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vluxei.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3522,7 +3524,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -3531,9 +3533,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vluxei.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3544,7 +3546,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i8>* %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -3554,10 +3556,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vluxei.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3569,7 +3571,7 @@ entry:
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -3578,9 +3580,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vluxei.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3591,7 +3593,7 @@ entry:
     <vscale x 64 x i8> undef,
     <vscale x 64 x i8>* %0,
     <vscale x 64 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 64 x i8> %a
 }
@@ -3601,10 +3603,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vluxei.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>*,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3616,7 +3618,7 @@ entry:
     <vscale x 64 x i8>* %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 64 x i8> %a
 }
@@ -3625,9 +3627,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3639,7 +3641,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i16>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -3649,10 +3651,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3664,7 +3666,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -3673,9 +3675,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3687,7 +3689,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i16>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -3697,10 +3699,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3712,7 +3714,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -3721,9 +3723,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3735,7 +3737,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i16>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -3745,10 +3747,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3760,7 +3762,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -3769,9 +3771,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3783,7 +3785,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i16>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -3793,10 +3795,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3808,7 +3810,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -3817,9 +3819,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vluxei.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3831,7 +3833,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i16>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -3841,10 +3843,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vluxei.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3856,7 +3858,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -3865,9 +3867,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vluxei.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3879,7 +3881,7 @@ entry:
     <vscale x 32 x i16> undef,
     <vscale x 32 x i16>* %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i16> %a
 }
@@ -3889,10 +3891,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vluxei.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3904,7 +3906,7 @@ entry:
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -3913,9 +3915,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3927,7 +3929,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i32>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -3937,10 +3939,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3952,7 +3954,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -3961,9 +3963,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -3975,7 +3977,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i32>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -3985,10 +3987,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4000,7 +4002,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -4009,9 +4011,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4023,7 +4025,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i32>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -4033,10 +4035,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4048,7 +4050,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -4057,9 +4059,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4071,7 +4073,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i32>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -4081,10 +4083,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4096,7 +4098,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -4105,9 +4107,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vluxei.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4119,7 +4121,7 @@ entry:
     <vscale x 16 x i32> undef,
     <vscale x 16 x i32>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i32> %a
 }
@@ -4129,10 +4131,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vluxei.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4144,7 +4146,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -4153,9 +4155,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4167,7 +4169,7 @@ entry:
     <vscale x 1 x i64> undef,
     <vscale x 1 x i64>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i64> %a
 }
@@ -4177,10 +4179,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4192,7 +4194,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -4201,9 +4203,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4215,7 +4217,7 @@ entry:
     <vscale x 2 x i64> undef,
     <vscale x 2 x i64>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i64> %a
 }
@@ -4225,10 +4227,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4240,7 +4242,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -4249,9 +4251,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4263,7 +4265,7 @@ entry:
     <vscale x 4 x i64> undef,
     <vscale x 4 x i64>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i64> %a
 }
@@ -4273,10 +4275,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4288,7 +4290,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -4297,9 +4299,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4311,7 +4313,7 @@ entry:
     <vscale x 8 x i64> undef,
     <vscale x 8 x i64>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i64> %a
 }
@@ -4321,10 +4323,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4336,7 +4338,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -4345,9 +4347,9 @@ declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4359,7 +4361,7 @@ entry:
     <vscale x 1 x half> undef,
     <vscale x 1 x half>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x half> %a
 }
@@ -4369,10 +4371,10 @@ declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4384,7 +4386,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x half> %a
 }
@@ -4393,9 +4395,9 @@ declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4407,7 +4409,7 @@ entry:
     <vscale x 2 x half> undef,
     <vscale x 2 x half>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x half> %a
 }
@@ -4417,10 +4419,10 @@ declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4432,7 +4434,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x half> %a
 }
@@ -4441,9 +4443,9 @@ declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4455,7 +4457,7 @@ entry:
     <vscale x 4 x half> undef,
     <vscale x 4 x half>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x half> %a
 }
@@ -4465,10 +4467,10 @@ declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4480,7 +4482,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x half> %a
 }
@@ -4489,9 +4491,9 @@ declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4503,7 +4505,7 @@ entry:
     <vscale x 8 x half> undef,
     <vscale x 8 x half>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x half> %a
 }
@@ -4513,10 +4515,10 @@ declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4528,7 +4530,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x half> %a
 }
@@ -4537,9 +4539,9 @@ declare <vscale x 16 x half> @llvm.riscv.vluxei.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4551,7 +4553,7 @@ entry:
     <vscale x 16 x half> undef,
     <vscale x 16 x half>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x half> %a
 }
@@ -4561,10 +4563,10 @@ declare <vscale x 16 x half> @llvm.riscv.vluxei.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4576,7 +4578,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x half> %a
 }
@@ -4585,9 +4587,9 @@ declare <vscale x 32 x half> @llvm.riscv.vluxei.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half>* %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4599,7 +4601,7 @@ entry:
     <vscale x 32 x half> undef,
     <vscale x 32 x half>* %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x half> %a
 }
@@ -4609,10 +4611,10 @@ declare <vscale x 32 x half> @llvm.riscv.vluxei.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4624,7 +4626,7 @@ entry:
     <vscale x 32 x half>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x half> %a
 }
@@ -4633,9 +4635,9 @@ declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4647,7 +4649,7 @@ entry:
     <vscale x 1 x float> undef,
     <vscale x 1 x float>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x float> %a
 }
@@ -4657,10 +4659,10 @@ declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i8(
   <vscale x 1 x float>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4672,7 +4674,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x float> %a
 }
@@ -4681,9 +4683,9 @@ declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4695,7 +4697,7 @@ entry:
     <vscale x 2 x float> undef,
     <vscale x 2 x float>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x float> %a
 }
@@ -4705,10 +4707,10 @@ declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i8(
   <vscale x 2 x float>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4720,7 +4722,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x float> %a
 }
@@ -4729,9 +4731,9 @@ declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4743,7 +4745,7 @@ entry:
     <vscale x 4 x float> undef,
     <vscale x 4 x float>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x float> %a
 }
@@ -4753,10 +4755,10 @@ declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i8(
   <vscale x 4 x float>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4768,7 +4770,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x float> %a
 }
@@ -4777,9 +4779,9 @@ declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4791,7 +4793,7 @@ entry:
     <vscale x 8 x float> undef,
     <vscale x 8 x float>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x float> %a
 }
@@ -4801,10 +4803,10 @@ declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i8(
   <vscale x 8 x float>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4816,7 +4818,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x float> %a
 }
@@ -4825,9 +4827,9 @@ declare <vscale x 16 x float> @llvm.riscv.vluxei.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float>* %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4839,7 +4841,7 @@ entry:
     <vscale x 16 x float> undef,
     <vscale x 16 x float>* %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x float> %a
 }
@@ -4849,10 +4851,10 @@ declare <vscale x 16 x float> @llvm.riscv.vluxei.mask.nxv16f32.nxv16i8(
   <vscale x 16 x float>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4864,7 +4866,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x float> %a
 }
@@ -4873,9 +4875,9 @@ declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double>* %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4887,7 +4889,7 @@ entry:
     <vscale x 1 x double> undef,
     <vscale x 1 x double>* %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x double> %a
 }
@@ -4897,10 +4899,10 @@ declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i8(
   <vscale x 1 x double>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4912,7 +4914,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x double> %a
 }
@@ -4921,9 +4923,9 @@ declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double>* %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4935,7 +4937,7 @@ entry:
     <vscale x 2 x double> undef,
     <vscale x 2 x double>* %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x double> %a
 }
@@ -4945,10 +4947,10 @@ declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i8(
   <vscale x 2 x double>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4960,7 +4962,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x double> %a
 }
@@ -4969,9 +4971,9 @@ declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double>* %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4983,7 +4985,7 @@ entry:
     <vscale x 4 x double> undef,
     <vscale x 4 x double>* %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x double> %a
 }
@@ -4993,10 +4995,10 @@ declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i8(
   <vscale x 4 x double>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -5008,7 +5010,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x double> %a
 }
@@ -5017,9 +5019,9 @@ declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double>* %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -5031,7 +5033,7 @@ entry:
     <vscale x 8 x double> undef,
     <vscale x 8 x double>* %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x double> %a
 }
@@ -5041,10 +5043,10 @@ declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i8(
   <vscale x 8 x double>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -5056,7 +5058,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
deleted file mode 100644
index 9452abe171184..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv32.ll
+++ /dev/null
@@ -1,1934 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vnclip.wv v11, v8, v10
-; CHECK-NEXT:    vmv.v.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vnclip.wv v14, v8, v12
-; CHECK-NEXT:    vmv.v.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vnclip.wv v20, v8, v16
-; CHECK-NEXT:    vmv.v.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vnclip.wv v11, v8, v10
-; CHECK-NEXT:    vmv.v.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vnclip.wv v14, v8, v12
-; CHECK-NEXT:    vmv.v.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vnclip.wv v20, v8, v16
-; CHECK-NEXT:    vmv.v.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vnclip.wv v11, v8, v10
-; CHECK-NEXT:    vmv.v.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vnclip.wv v14, v8, v12
-; CHECK-NEXT:    vmv.v.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vnclip.wv v20, v8, v16
-; CHECK-NEXT:    vmv.v.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vnclip.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i16>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i16>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i16> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i16>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i16>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i16> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i16> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i16>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vnclip.wx v10, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i16>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i16> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i16>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vnclip.wx v12, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i16>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i16> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i16>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vnclip.wx v16, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i16> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i16>,
-  i32,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i16> %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i32> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i32> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vnclip.wx v10, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i32> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vnclip.wx v12, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i32> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vnclip.wx v16, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i32,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i32> %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i32,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vnclip.wx v10, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i32,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vnclip.wx v12, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i32,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vnclip.wx v16, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i64> %0,
-    i32 %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i32,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vnclip.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i16> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i16> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i16> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vnclip.wi v10, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i16> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vnclip.wi v12, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i16> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vnclip.wi v16, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i16> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i16> %1,
-    i32 9,
-    <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i32> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i32> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vnclip.wi v10, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i32> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vnclip.wi v12, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i32> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vnclip.wi v16, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i32> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i32> %1,
-    i32 9,
-    <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    i32 9,
-    <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vnclip.wi v10, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    i32 9,
-    <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vnclip.wi v12, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    i32 9,
-    <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vnclip.wi v16, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i64> %0,
-    i32 9,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vnclip.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    i32 9,
-    <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vnclip.ll
index df8def1426f02..72f5bb4856030 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclip-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclip.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
 declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i8_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -170,10 +172,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i8_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -185,7 +187,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -194,9 +196,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i8_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -208,7 +210,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -218,10 +220,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i8_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -233,7 +235,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -242,9 +244,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv32i8_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -256,7 +258,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -266,10 +268,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv32i8_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -281,7 +283,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -290,9 +292,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i16_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -303,7 +305,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -313,10 +315,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i16_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -328,7 +330,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -337,9 +339,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i16_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -350,7 +352,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i16_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -375,7 +377,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -384,9 +386,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i16_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -398,7 +400,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -408,10 +410,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i16_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -423,7 +425,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -432,9 +434,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i16_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -446,7 +448,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -456,10 +458,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i16_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -471,7 +473,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -480,9 +482,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv16i16_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -494,7 +496,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -504,10 +506,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv16i16_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -519,7 +521,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -528,9 +530,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv1i32_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -541,7 +543,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -551,10 +553,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i32_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -566,7 +568,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -575,9 +577,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv2i32_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -589,7 +591,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -599,10 +601,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv2i32_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -614,7 +616,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -623,9 +625,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv4i32_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -637,7 +639,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -647,10 +649,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv4i32_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -671,9 +673,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_wv_nxv8i32_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -685,7 +687,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
-    i64 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -695,10 +697,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv8i32_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -710,7 +712,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,10 +720,10 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclip_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -731,8 +733,8 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -740,12 +742,12 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -755,9 +757,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -765,10 +767,10 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclip_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -778,8 +780,8 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -787,12 +789,12 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -802,9 +804,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -812,10 +814,10 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclip_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -825,8 +827,8 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -834,12 +836,12 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -849,9 +851,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -859,10 +861,10 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclip_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -873,8 +875,8 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -882,12 +884,12 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -897,9 +899,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -907,10 +909,10 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclip_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -921,8 +923,8 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -930,12 +932,12 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -945,9 +947,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -955,10 +957,10 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclip_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -969,8 +971,8 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i16> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -978,12 +980,12 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -993,9 +995,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1003,10 +1005,10 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclip_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1016,8 +1018,8 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1025,12 +1027,12 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1040,9 +1042,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1050,10 +1052,10 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclip_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1063,8 +1065,8 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1072,12 +1074,12 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1087,9 +1089,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1097,10 +1099,10 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclip_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1111,8 +1113,8 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1120,12 +1122,12 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1135,9 +1137,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1145,10 +1147,10 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclip_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1159,8 +1161,8 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1168,12 +1170,12 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1183,9 +1185,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1193,10 +1195,10 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclip_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1207,8 +1209,8 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i32> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1216,12 +1218,12 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1231,9 +1233,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1241,10 +1243,10 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclip_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1254,8 +1256,8 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1263,12 +1265,12 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1278,9 +1280,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1288,10 +1290,10 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclip_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1302,8 +1304,8 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1311,12 +1313,12 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1326,9 +1328,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1336,10 +1338,10 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclip_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1350,8 +1352,8 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1359,12 +1361,12 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1374,9 +1376,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1384,10 +1386,10 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclip_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1398,8 +1400,8 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1407,12 +1409,12 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1422,14 +1424,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -1439,13 +1441,13 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.nxv1i8.nxv1i16(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -1455,14 +1457,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -1472,13 +1474,13 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.nxv2i8.nxv2i16(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -1488,14 +1490,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclip.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -1505,13 +1507,13 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.nxv4i8.nxv4i16(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -1521,14 +1523,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclip.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -1539,13 +1541,13 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.nxv8i8.nxv8i16(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -1555,14 +1557,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclip.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -1573,13 +1575,13 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.nxv16i8.nxv16i16(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -1589,14 +1591,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclip.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -1607,13 +1609,13 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.nxv32i8.nxv32i16(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i16> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -1623,14 +1625,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclip.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1640,13 +1642,13 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.nxv1i16.nxv1i32(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1656,14 +1658,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclip.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1673,13 +1675,13 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.nxv2i16.nxv2i32(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1689,14 +1691,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclip.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1707,13 +1709,13 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.nxv4i16.nxv4i32(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1723,14 +1725,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclip.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1741,13 +1743,13 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.nxv8i16.nxv8i32(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1757,14 +1759,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclip.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1775,13 +1777,13 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.nxv16i16.nxv16i32(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i32> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1791,14 +1793,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclip.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1808,13 +1810,13 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.nxv1i32.nxv1i64(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1824,14 +1826,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclip.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1842,13 +1844,13 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.nxv2i32.nxv2i64(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1858,14 +1860,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclip.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1876,13 +1878,13 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1892,14 +1894,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclip.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1910,13 +1912,13 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.nxv8i32.nxv8i64(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i64> %0,
-    i64 9,
-    i64 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclip_mask_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1926,9 +1928,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclip.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i64 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
deleted file mode 100644
index 4a41fe987c2d1..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv64.ll
+++ /dev/null
@@ -1,1934 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wv v11, v8, v10
-; CHECK-NEXT:    vmv.v.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v14, v8, v12
-; CHECK-NEXT:    vmv.v.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v20, v8, v16
-; CHECK-NEXT:    vmv.v.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i8> %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wv v11, v8, v10
-; CHECK-NEXT:    vmv.v.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v14, v8, v12
-; CHECK-NEXT:    vmv.v.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v20, v8, v16
-; CHECK-NEXT:    vmv.v.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v9, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wv v11, v8, v10
-; CHECK-NEXT:    vmv.v.v v8, v11
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v10, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v14, v8, v12
-; CHECK-NEXT:    vmv.v.v v8, v14
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v12, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v20, v8, v16
-; CHECK-NEXT:    vmv.v.v v8, v20
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wv v8, v16, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i16>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i16>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i16> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i16>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i16>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i16> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i16>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i16> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i16>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wx v10, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i16>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i16> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i16>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v12, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i16>,
-  i64,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i16> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i16>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v16, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i16> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i16>,
-  i64,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i16> %1,
-    i64 %2,
-    <vscale x 32 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i32>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i32> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i32>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i32> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wx v10, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i32>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i32> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v12, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i32>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i32> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v16, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i32> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i32>,
-  i64,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i32> %1,
-    i64 %2,
-    <vscale x 16 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v8, a0
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i64>,
-  i64,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v9, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    i64 %2,
-    <vscale x 1 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wx v10, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i64>,
-  i64,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v10, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    i64 %2,
-    <vscale x 2 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v12, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i64>,
-  i64,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v12, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    i64 %2,
-    <vscale x 4 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v16, v8, a0
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i64> %0,
-    i64 %1,
-    i64 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i64>,
-  i64,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wx v8, v16, a0, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    i64 %2,
-    <vscale x 8 x i1> %3,
-    i64 %4, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> undef,
-    <vscale x 1 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i16> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> undef,
-    <vscale x 2 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i16> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> undef,
-    <vscale x 4 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i16> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wi v10, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> undef,
-    <vscale x 8 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i16> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v12, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> undef,
-    <vscale x 16 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i16> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v16, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> undef,
-    <vscale x 32 x i16> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i16> %1,
-    i64 9,
-    <vscale x 32 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i32> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i32> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wi v10, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i32> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v12, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i32> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v16, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i32> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i32> %1,
-    i64 9,
-    <vscale x 16 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v8, 9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v9, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i64> %1,
-    i64 9,
-    <vscale x 1 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wi v10, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v10, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i64> %1,
-    i64 9,
-    <vscale x 2 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v12, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v12, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i64> %1,
-    i64 9,
-    <vscale x 4 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v16, v8, 9
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i64> %0,
-    i64 9,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vnclipu.wi v8, v16, 9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i64> %1,
-    i64 9,
-    <vscale x 8 x i1> %2,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vnclipu.ll
index 7d368f9a0ca2a..83c47ebabd022 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnclipu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnclipu.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> undef,
     <vscale x 1 x i16> %0,
     <vscale x 1 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -28,10 +30,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -43,7 +45,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -52,9 +54,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -65,7 +67,7 @@ entry:
     <vscale x 2 x i8> undef,
     <vscale x 2 x i16> %0,
     <vscale x 2 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -75,10 +77,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -90,7 +92,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -99,9 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -112,7 +114,7 @@ entry:
     <vscale x 4 x i8> undef,
     <vscale x 4 x i16> %0,
     <vscale x 4 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -122,10 +124,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -137,7 +139,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -146,9 +148,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i8_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -160,7 +162,7 @@ entry:
     <vscale x 8 x i8> undef,
     <vscale x 8 x i16> %0,
     <vscale x 8 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -170,10 +172,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i8_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -185,7 +187,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -194,9 +196,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i8_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -208,7 +210,7 @@ entry:
     <vscale x 16 x i8> undef,
     <vscale x 16 x i16> %0,
     <vscale x 16 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -218,10 +220,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i8_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -233,7 +235,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -242,9 +244,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv32i8_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -256,7 +258,7 @@ entry:
     <vscale x 32 x i8> undef,
     <vscale x 32 x i16> %0,
     <vscale x 32 x i8> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -266,10 +268,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv32i8_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -281,7 +283,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -290,9 +292,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i16_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -303,7 +305,7 @@ entry:
     <vscale x 1 x i16> undef,
     <vscale x 1 x i32> %0,
     <vscale x 1 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -313,10 +315,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i16_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -328,7 +330,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -337,9 +339,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i16_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -350,7 +352,7 @@ entry:
     <vscale x 2 x i16> undef,
     <vscale x 2 x i32> %0,
     <vscale x 2 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -360,10 +362,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i16_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -375,7 +377,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -384,9 +386,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i16_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -398,7 +400,7 @@ entry:
     <vscale x 4 x i16> undef,
     <vscale x 4 x i32> %0,
     <vscale x 4 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -408,10 +410,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i16_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -423,7 +425,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -432,9 +434,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i16_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -446,7 +448,7 @@ entry:
     <vscale x 8 x i16> undef,
     <vscale x 8 x i32> %0,
     <vscale x 8 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -456,10 +458,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i16_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -471,7 +473,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -480,9 +482,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv16i16_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -494,7 +496,7 @@ entry:
     <vscale x 16 x i16> undef,
     <vscale x 16 x i32> %0,
     <vscale x 16 x i16> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -504,10 +506,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv16i16_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -519,7 +521,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -528,9 +530,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv1i32_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -541,7 +543,7 @@ entry:
     <vscale x 1 x i32> undef,
     <vscale x 1 x i64> %0,
     <vscale x 1 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -551,10 +553,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv1i32_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -566,7 +568,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -575,9 +577,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv2i32_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -589,7 +591,7 @@ entry:
     <vscale x 2 x i32> undef,
     <vscale x 2 x i64> %0,
     <vscale x 2 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -599,10 +601,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv2i32_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -614,7 +616,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -623,9 +625,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv4i32_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -637,7 +639,7 @@ entry:
     <vscale x 4 x i32> undef,
     <vscale x 4 x i64> %0,
     <vscale x 4 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -647,10 +649,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv4i32_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -671,9 +673,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_wv_nxv8i32_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -685,7 +687,7 @@ entry:
     <vscale x 8 x i32> undef,
     <vscale x 8 x i64> %0,
     <vscale x 8 x i32> %1,
-    i32 %2)
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -695,10 +697,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_wv_nxv8i32_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -710,7 +712,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,10 +720,10 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclipu_vx_nxv1i8_nxv1i16(<vscale x 1 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -731,8 +733,8 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i8> %a
 }
@@ -740,12 +742,12 @@ entry:
 declare <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i16>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -755,9 +757,9 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
@@ -765,10 +767,10 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclipu_vx_nxv2i8_nxv2i16(<vscale x 2 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -778,8 +780,8 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i8> %a
 }
@@ -787,12 +789,12 @@ entry:
 declare <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i16>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -802,9 +804,9 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
@@ -812,10 +814,10 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclipu_vx_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -825,8 +827,8 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i8> %a
 }
@@ -834,12 +836,12 @@ entry:
 declare <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i16>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -849,9 +851,9 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
@@ -859,10 +861,10 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclipu_vx_nxv8i8_nxv8i16(<vscale x 8 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -873,8 +875,8 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i8> %a
 }
@@ -882,12 +884,12 @@ entry:
 declare <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i16>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -897,9 +899,9 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
@@ -907,10 +909,10 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclipu_vx_nxv16i8_nxv16i16(<vscale x 16 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -921,8 +923,8 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i8> %a
 }
@@ -930,12 +932,12 @@ entry:
 declare <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i16>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -945,9 +947,9 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
@@ -955,10 +957,10 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclipu_vx_nxv32i8_nxv32i16(<vscale x 32 x i16> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -969,8 +971,8 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i16> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 32 x i8> %a
 }
@@ -978,12 +980,12 @@ entry:
 declare <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i16>,
-  i32,
+  iXLen,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -993,9 +995,9 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1003,10 +1005,10 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclipu_vx_nxv1i16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1016,8 +1018,8 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1025,12 +1027,12 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i32>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1040,9 +1042,9 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1050,10 +1052,10 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclipu_vx_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1063,8 +1065,8 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1072,12 +1074,12 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i32>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1087,9 +1089,9 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1097,10 +1099,10 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclipu_vx_nxv4i16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1111,8 +1113,8 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1120,12 +1122,12 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i32>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1135,9 +1137,9 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1145,10 +1147,10 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclipu_vx_nxv8i16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1159,8 +1161,8 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1168,12 +1170,12 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i32>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1183,9 +1185,9 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1193,10 +1195,10 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclipu_vx_nxv16i16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1207,8 +1209,8 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i32> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1216,12 +1218,12 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i32>,
-  i32,
+  iXLen,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1231,9 +1233,9 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1241,10 +1243,10 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclipu_vx_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1254,8 +1256,8 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1263,12 +1265,12 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
   <vscale x 1 x i32>,
   <vscale x 1 x i64>,
-  i32,
+  iXLen,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1278,9 +1280,9 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1288,10 +1290,10 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclipu_vx_nxv2i32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1302,8 +1304,8 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1311,12 +1313,12 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
   <vscale x 2 x i32>,
   <vscale x 2 x i64>,
-  i32,
+  iXLen,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1326,9 +1328,9 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1336,10 +1338,10 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclipu_vx_nxv4i32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1350,8 +1352,8 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1359,12 +1361,12 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
   <vscale x 4 x i32>,
   <vscale x 4 x i64>,
-  i32,
+  iXLen,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1374,9 +1376,9 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1384,10 +1386,10 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclipu_vx_nxv8i32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1398,8 +1400,8 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i64> %0,
-    i32 %1,
-    i32 %2)
+    iXLen %1,
+    iXLen %2)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1407,12 +1409,12 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
   <vscale x 8 x i32>,
   <vscale x 8 x i64>,
-  i32,
+  iXLen,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vx_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1422,14 +1424,14 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i32 %4, i32 1)
+    iXLen %4, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -1439,13 +1441,13 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.nxv1i8.nxv1i16(
     <vscale x 1 x i8> undef,
     <vscale x 1 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i8_nxv1i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
@@ -1455,14 +1457,14 @@ entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vnclipu.mask.nxv1i8.nxv1i16(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -1472,13 +1474,13 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.nxv2i8.nxv2i16(
     <vscale x 2 x i8> undef,
     <vscale x 2 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i8_nxv2i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
@@ -1488,14 +1490,14 @@ entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vnclipu.mask.nxv2i8.nxv2i16(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -1505,13 +1507,13 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.nxv4i8.nxv4i16(
     <vscale x 4 x i8> undef,
     <vscale x 4 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i8_nxv4i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
@@ -1521,14 +1523,14 @@ entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vnclipu.mask.nxv4i8.nxv4i16(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -1539,13 +1541,13 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.nxv8i8.nxv8i16(
     <vscale x 8 x i8> undef,
     <vscale x 8 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i8_nxv8i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
@@ -1555,14 +1557,14 @@ entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vnclipu.mask.nxv8i8.nxv8i16(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -1573,13 +1575,13 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.nxv16i8.nxv16i16(
     <vscale x 16 x i8> undef,
     <vscale x 16 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i8_nxv16i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
@@ -1589,14 +1591,14 @@ entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vnclipu.mask.nxv16i8.nxv16i16(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -1607,13 +1609,13 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.nxv32i8.nxv32i16(
     <vscale x 32 x i8> undef,
     <vscale x 32 x i16> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv32i8_nxv32i16_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
@@ -1623,14 +1625,14 @@ entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vnclipu.mask.nxv32i8.nxv32i16(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i16> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 32 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1640,13 +1642,13 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.nxv1i16.nxv1i32(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i16_nxv1i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1656,14 +1658,14 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vnclipu.mask.nxv1i16.nxv1i32(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1673,13 +1675,13 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.nxv2i16.nxv2i32(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i16_nxv2i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1689,14 +1691,14 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vnclipu.mask.nxv2i16.nxv2i32(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1707,13 +1709,13 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.nxv4i16.nxv4i32(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i16_nxv4i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1723,14 +1725,14 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vnclipu.mask.nxv4i16.nxv4i32(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1741,13 +1743,13 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.nxv8i16.nxv8i32(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i16_nxv8i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1757,14 +1759,14 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vnclipu.mask.nxv8i16.nxv8i32(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1775,13 +1777,13 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.nxv16i16.nxv16i32(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i32> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16(<vscale x 16 x i16> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv16i16_nxv16i32_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1791,14 +1793,14 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vnclipu.mask.nxv16i16.nxv16i32(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i32> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 16 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1808,13 +1810,13 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.nxv1i32.nxv1i64(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv1i32_nxv1i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -1824,14 +1826,14 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vnclipu.mask.nxv1i32.nxv1i64(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 1 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1842,13 +1844,13 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.nxv2i32.nxv2i64(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv2i32_nxv2i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -1858,14 +1860,14 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vnclipu.mask.nxv2i32.nxv2i64(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 2 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1876,13 +1878,13 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.nxv4i32.nxv4i64(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv4i32_nxv4i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -1892,14 +1894,14 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vnclipu.mask.nxv4i32.nxv4i64(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 4 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i64> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1910,13 +1912,13 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.nxv8i32.nxv8i64(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i64> %0,
-    i32 9,
-    i32 %1)
+    iXLen 9,
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
 
-define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vnclipu_mask_vi_nxv8i32_nxv8i64_i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -1926,9 +1928,9 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vnclipu.mask.nxv8i32.nxv8i64(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i64> %1,
-    i32 9,
+    iXLen 9,
     <vscale x 8 x i1> %2,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
deleted file mode 100644
index 645c5b09f7cc5..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv32.ll
+++ /dev/null
@@ -1,1904 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-
-declare i8 @llvm.vector.reduce.add.nxv1i8(<vscale x 1 x i8>)
-
-define signext i8 @vreduce_add_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vreduce_add_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.add.nxv1i8(<vscale x 1 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.umax.nxv1i8(<vscale x 1 x i8>)
-
-define signext i8 @vreduce_umax_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vreduce_umax_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.umax.nxv1i8(<vscale x 1 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8>)
-
-define signext i8 @vreduce_smax_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vreduce_smax_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -128
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.smax.nxv1i8(<vscale x 1 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.umin.nxv1i8(<vscale x 1 x i8>)
-
-define signext i8 @vreduce_umin_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vreduce_umin_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.umin.nxv1i8(<vscale x 1 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8>)
-
-define signext i8 @vreduce_smin_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vreduce_smin_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, 127
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.smin.nxv1i8(<vscale x 1 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.and.nxv1i8(<vscale x 1 x i8>)
-
-define signext i8 @vreduce_and_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vreduce_and_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.and.nxv1i8(<vscale x 1 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.or.nxv1i8(<vscale x 1 x i8>)
-
-define signext i8 @vreduce_or_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vreduce_or_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.or.nxv1i8(<vscale x 1 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.xor.nxv1i8(<vscale x 1 x i8>)
-
-define signext i8 @vreduce_xor_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vreduce_xor_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.xor.nxv1i8(<vscale x 1 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.add.nxv2i8(<vscale x 2 x i8>)
-
-define signext i8 @vreduce_add_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vreduce_add_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.add.nxv2i8(<vscale x 2 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.umax.nxv2i8(<vscale x 2 x i8>)
-
-define signext i8 @vreduce_umax_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vreduce_umax_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.umax.nxv2i8(<vscale x 2 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8>)
-
-define signext i8 @vreduce_smax_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vreduce_smax_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -128
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.smax.nxv2i8(<vscale x 2 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.umin.nxv2i8(<vscale x 2 x i8>)
-
-define signext i8 @vreduce_umin_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vreduce_umin_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.umin.nxv2i8(<vscale x 2 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8>)
-
-define signext i8 @vreduce_smin_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vreduce_smin_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, 127
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.smin.nxv2i8(<vscale x 2 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.and.nxv2i8(<vscale x 2 x i8>)
-
-define signext i8 @vreduce_and_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vreduce_and_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.and.nxv2i8(<vscale x 2 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.or.nxv2i8(<vscale x 2 x i8>)
-
-define signext i8 @vreduce_or_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vreduce_or_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.or.nxv2i8(<vscale x 2 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.xor.nxv2i8(<vscale x 2 x i8>)
-
-define signext i8 @vreduce_xor_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vreduce_xor_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.xor.nxv2i8(<vscale x 2 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.add.nxv4i8(<vscale x 4 x i8>)
-
-define signext i8 @vreduce_add_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vreduce_add_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.add.nxv4i8(<vscale x 4 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.umax.nxv4i8(<vscale x 4 x i8>)
-
-define signext i8 @vreduce_umax_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vreduce_umax_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.umax.nxv4i8(<vscale x 4 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8>)
-
-define signext i8 @vreduce_smax_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vreduce_smax_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -128
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.smax.nxv4i8(<vscale x 4 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.umin.nxv4i8(<vscale x 4 x i8>)
-
-define signext i8 @vreduce_umin_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vreduce_umin_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.umin.nxv4i8(<vscale x 4 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8>)
-
-define signext i8 @vreduce_smin_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vreduce_smin_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, 127
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.smin.nxv4i8(<vscale x 4 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.and.nxv4i8(<vscale x 4 x i8>)
-
-define signext i8 @vreduce_and_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vreduce_and_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.and.nxv4i8(<vscale x 4 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.or.nxv4i8(<vscale x 4 x i8>)
-
-define signext i8 @vreduce_or_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vreduce_or_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.or.nxv4i8(<vscale x 4 x i8> %v)
-  ret i8 %red
-}
-
-declare i8 @llvm.vector.reduce.xor.nxv4i8(<vscale x 4 x i8>)
-
-define signext i8 @vreduce_xor_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vreduce_xor_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i8 @llvm.vector.reduce.xor.nxv4i8(<vscale x 4 x i8> %v)
-  ret i8 %red
-}
-
-declare i16 @llvm.vector.reduce.add.nxv1i16(<vscale x 1 x i16>)
-
-define signext i16 @vreduce_add_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_add_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.add.nxv1i16(<vscale x 1 x i16> %v)
-  ret i16 %red
-}
-
-define signext i16 @vwreduce_add_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vwreduce_add_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 1 x i8> %v to <vscale x 1 x i16>
-  %red = call i16 @llvm.vector.reduce.add.nxv1i16(<vscale x 1 x i16> %e)
-  ret i16 %red
-}
-
-define signext i16 @vwreduce_uadd_nxv1i8(<vscale x 1 x i8> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv1i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 1 x i8> %v to <vscale x 1 x i16>
-  %red = call i16 @llvm.vector.reduce.add.nxv1i16(<vscale x 1 x i16> %e)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.umax.nxv1i16(<vscale x 1 x i16>)
-
-define signext i16 @vreduce_umax_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_umax_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.umax.nxv1i16(<vscale x 1 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.smax.nxv1i16(<vscale x 1 x i16>)
-
-define signext i16 @vreduce_smax_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_smax_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 1048568
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.smax.nxv1i16(<vscale x 1 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.umin.nxv1i16(<vscale x 1 x i16>)
-
-define signext i16 @vreduce_umin_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_umin_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.umin.nxv1i16(<vscale x 1 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16>)
-
-define signext i16 @vreduce_smin_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_smin_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.and.nxv1i16(<vscale x 1 x i16>)
-
-define signext i16 @vreduce_and_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_and_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.and.nxv1i16(<vscale x 1 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.or.nxv1i16(<vscale x 1 x i16>)
-
-define signext i16 @vreduce_or_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_or_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.or.nxv1i16(<vscale x 1 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.xor.nxv1i16(<vscale x 1 x i16>)
-
-define signext i16 @vreduce_xor_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_xor_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.xor.nxv1i16(<vscale x 1 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.add.nxv2i16(<vscale x 2 x i16>)
-
-define signext i16 @vreduce_add_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_add_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.add.nxv2i16(<vscale x 2 x i16> %v)
-  ret i16 %red
-}
-
-define signext i16 @vwreduce_add_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vwreduce_add_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 2 x i8> %v to <vscale x 2 x i16>
-  %red = call i16 @llvm.vector.reduce.add.nxv2i16(<vscale x 2 x i16> %e)
-  ret i16 %red
-}
-
-define signext i16 @vwreduce_uadd_nxv2i8(<vscale x 2 x i8> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv2i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 2 x i8> %v to <vscale x 2 x i16>
-  %red = call i16 @llvm.vector.reduce.add.nxv2i16(<vscale x 2 x i16> %e)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.umax.nxv2i16(<vscale x 2 x i16>)
-
-define signext i16 @vreduce_umax_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_umax_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.umax.nxv2i16(<vscale x 2 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.smax.nxv2i16(<vscale x 2 x i16>)
-
-define signext i16 @vreduce_smax_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_smax_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 1048568
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.smax.nxv2i16(<vscale x 2 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.umin.nxv2i16(<vscale x 2 x i16>)
-
-define signext i16 @vreduce_umin_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_umin_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.umin.nxv2i16(<vscale x 2 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16>)
-
-define signext i16 @vreduce_smin_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_smin_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.and.nxv2i16(<vscale x 2 x i16>)
-
-define signext i16 @vreduce_and_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_and_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.and.nxv2i16(<vscale x 2 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.or.nxv2i16(<vscale x 2 x i16>)
-
-define signext i16 @vreduce_or_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_or_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.or.nxv2i16(<vscale x 2 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.xor.nxv2i16(<vscale x 2 x i16>)
-
-define signext i16 @vreduce_xor_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_xor_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.xor.nxv2i16(<vscale x 2 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.add.nxv4i16(<vscale x 4 x i16>)
-
-define signext i16 @vreduce_add_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_add_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.add.nxv4i16(<vscale x 4 x i16> %v)
-  ret i16 %red
-}
-
-define signext i16 @vwreduce_add_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vwreduce_add_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 4 x i8> %v to <vscale x 4 x i16>
-  %red = call i16 @llvm.vector.reduce.add.nxv4i16(<vscale x 4 x i16> %e)
-  ret i16 %red
-}
-
-define signext i16 @vwreduce_uadd_nxv4i8(<vscale x 4 x i8> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv4i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 4 x i8> %v to <vscale x 4 x i16>
-  %red = call i16 @llvm.vector.reduce.add.nxv4i16(<vscale x 4 x i16> %e)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.umax.nxv4i16(<vscale x 4 x i16>)
-
-define signext i16 @vreduce_umax_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_umax_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.umax.nxv4i16(<vscale x 4 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.smax.nxv4i16(<vscale x 4 x i16>)
-
-define signext i16 @vreduce_smax_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_smax_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 1048568
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.smax.nxv4i16(<vscale x 4 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.umin.nxv4i16(<vscale x 4 x i16>)
-
-define signext i16 @vreduce_umin_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_umin_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.umin.nxv4i16(<vscale x 4 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16>)
-
-define signext i16 @vreduce_smin_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_smin_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.and.nxv4i16(<vscale x 4 x i16>)
-
-define signext i16 @vreduce_and_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_and_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.and.nxv4i16(<vscale x 4 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.or.nxv4i16(<vscale x 4 x i16>)
-
-define signext i16 @vreduce_or_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_or_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.or.nxv4i16(<vscale x 4 x i16> %v)
-  ret i16 %red
-}
-
-declare i16 @llvm.vector.reduce.xor.nxv4i16(<vscale x 4 x i16>)
-
-define signext i16 @vreduce_xor_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_xor_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i16 @llvm.vector.reduce.xor.nxv4i16(<vscale x 4 x i16> %v)
-  ret i16 %red
-}
-
-declare i32 @llvm.vector.reduce.add.nxv1i32(<vscale x 1 x i32>)
-
-define i32 @vreduce_add_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_add_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.add.nxv1i32(<vscale x 1 x i32> %v)
-  ret i32 %red
-}
-
-define i32 @vwreduce_add_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vwreduce_add_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 1 x i16> %v to <vscale x 1 x i32>
-  %red = call i32 @llvm.vector.reduce.add.nxv1i32(<vscale x 1 x i32> %e)
-  ret i32 %red
-}
-
-define i32 @vwreduce_uadd_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = zext <vscale x 1 x i16> %v to <vscale x 1 x i32>
-  %red = call i32 @llvm.vector.reduce.add.nxv1i32(<vscale x 1 x i32> %e)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32>)
-
-define i32 @vreduce_umax_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_umax_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.umax.nxv1i32(<vscale x 1 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.smax.nxv1i32(<vscale x 1 x i32>)
-
-define i32 @vreduce_smax_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_smax_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.smax.nxv1i32(<vscale x 1 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.umin.nxv1i32(<vscale x 1 x i32>)
-
-define i32 @vreduce_umin_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_umin_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.umin.nxv1i32(<vscale x 1 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32>)
-
-define i32 @vreduce_smin_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_smin_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.and.nxv1i32(<vscale x 1 x i32>)
-
-define i32 @vreduce_and_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_and_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.and.nxv1i32(<vscale x 1 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.or.nxv1i32(<vscale x 1 x i32>)
-
-define i32 @vreduce_or_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_or_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.or.nxv1i32(<vscale x 1 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.xor.nxv1i32(<vscale x 1 x i32>)
-
-define i32 @vreduce_xor_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_xor_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.xor.nxv1i32(<vscale x 1 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32>)
-
-define i32 @vreduce_add_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_add_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> %v)
-  ret i32 %red
-}
-
-define i32 @vwreduce_add_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vwreduce_add_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 2 x i16> %v to <vscale x 2 x i32>
-  %red = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> %e)
-  ret i32 %red
-}
-
-define i32 @vwreduce_uadd_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = zext <vscale x 2 x i16> %v to <vscale x 2 x i32>
-  %red = call i32 @llvm.vector.reduce.add.nxv2i32(<vscale x 2 x i32> %e)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.umax.nxv2i32(<vscale x 2 x i32>)
-
-define i32 @vreduce_umax_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_umax_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.umax.nxv2i32(<vscale x 2 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.smax.nxv2i32(<vscale x 2 x i32>)
-
-define i32 @vreduce_smax_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_smax_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.smax.nxv2i32(<vscale x 2 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.umin.nxv2i32(<vscale x 2 x i32>)
-
-define i32 @vreduce_umin_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_umin_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.umin.nxv2i32(<vscale x 2 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32>)
-
-define i32 @vreduce_smin_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_smin_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.and.nxv2i32(<vscale x 2 x i32>)
-
-define i32 @vreduce_and_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_and_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.and.nxv2i32(<vscale x 2 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32>)
-
-define i32 @vreduce_or_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_or_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.or.nxv2i32(<vscale x 2 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.xor.nxv2i32(<vscale x 2 x i32>)
-
-define i32 @vreduce_xor_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_xor_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.xor.nxv2i32(<vscale x 2 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32>)
-
-define i32 @vreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_add_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %v)
-  ret i32 %red
-}
-
-define i32 @vwreduce_add_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vwreduce_add_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 4 x i16> %v to <vscale x 4 x i32>
-  %red = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %e)
-  ret i32 %red
-}
-
-define i32 @vwreduce_uadd_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %e = zext <vscale x 4 x i16> %v to <vscale x 4 x i32>
-  %red = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> %e)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32>)
-
-define i32 @vreduce_umax_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_umax_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.umax.nxv4i32(<vscale x 4 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32>)
-
-define i32 @vreduce_smax_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_smax_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.smax.nxv4i32(<vscale x 4 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32>)
-
-define i32 @vreduce_umin_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_umin_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v10, -1
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.umin.nxv4i32(<vscale x 4 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32>)
-
-define i32 @vreduce_smin_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_smin_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32>)
-
-define i32 @vreduce_and_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_and_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v10, -1
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.and.nxv4i32(<vscale x 4 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32>)
-
-define i32 @vreduce_or_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_or_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.or.nxv4i32(<vscale x 4 x i32> %v)
-  ret i32 %red
-}
-
-declare i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32>)
-
-define i32 @vreduce_xor_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_xor_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
-  %red = call i32 @llvm.vector.reduce.xor.nxv4i32(<vscale x 4 x i32> %v)
-  ret i32 %red
-}
-
-declare i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64>)
-
-define i64 @vreduce_add_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_add_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64> %v)
-  ret i64 %red
-}
-
-define i64 @vwreduce_add_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vwreduce_add_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 1 x i32> %v to <vscale x 1 x i64>
-  %red = call i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64> %e)
-  ret i64 %red
-}
-
-define i64 @vwreduce_uadd_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %e = zext <vscale x 1 x i32> %v to <vscale x 1 x i64>
-  %red = call i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64> %e)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64>)
-
-define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_umax_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64>)
-
-define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_smax_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    sw zero, 8(sp)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64>)
-
-define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_umin_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64>)
-
-define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_smin_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v9, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.and.nxv1i64(<vscale x 1 x i64>)
-
-define i64 @vreduce_and_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_and_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.and.nxv1i64(<vscale x 1 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.or.nxv1i64(<vscale x 1 x i64>)
-
-define i64 @vreduce_or_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_or_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.or.nxv1i64(<vscale x 1 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.xor.nxv1i64(<vscale x 1 x i64>)
-
-define i64 @vreduce_xor_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_xor_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.xor.nxv1i64(<vscale x 1 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>)
-
-define i64 @vreduce_add_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_add_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %v)
-  ret i64 %red
-}
-
-define i64 @vwreduce_add_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vwreduce_add_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
-  %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %e)
-  ret i64 %red
-}
-
-define i64 @vwreduce_uadd_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %e = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
-  %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %e)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64>)
-
-define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_umax_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64>)
-
-define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_smax_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    sw zero, 8(sp)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64>)
-
-define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_umin_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v10, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64>)
-
-define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_smin_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v10, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64>)
-
-define i64 @vreduce_and_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_and_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v10, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64>)
-
-define i64 @vreduce_or_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_or_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64>)
-
-define i64 @vreduce_xor_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_xor_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64>)
-
-define i64 @vreduce_add_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_add_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %v)
-  ret i64 %red
-}
-
-define i64 @vwreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vwreduce_add_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v10
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %e = sext <vscale x 4 x i32> %v to <vscale x 4 x i64>
-  %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %e)
-  ret i64 %red
-}
-
-define i64 @vwreduce_uadd_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v10
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %e = zext <vscale x 4 x i32> %v to <vscale x 4 x i64>
-  %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %e)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64>)
-
-define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_umax_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64>)
-
-define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_smax_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    sw zero, 8(sp)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64>)
-
-define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_umin_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v12, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64>)
-
-define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_smin_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    sw a0, 8(sp)
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addi a0, a0, -1
-; CHECK-NEXT:    sw a0, 12(sp)
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    addi a0, sp, 8
-; CHECK-NEXT:    vlse64.v v12, (a0), zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    addi sp, sp, 16
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.and.nxv4i64(<vscale x 4 x i64>)
-
-define i64 @vreduce_and_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_and_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v12, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.and.nxv4i64(<vscale x 4 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.or.nxv4i64(<vscale x 4 x i64>)
-
-define i64 @vreduce_or_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_or_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.or.nxv4i64(<vscale x 4 x i64> %v)
-  ret i64 %red
-}
-
-declare i64 @llvm.vector.reduce.xor.nxv4i64(<vscale x 4 x i64>)
-
-define i64 @vreduce_xor_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_xor_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    li a1, 32
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vsrl.vx v8, v8, a1
-; CHECK-NEXT:    vmv.x.s a1, v8
-; CHECK-NEXT:    ret
-  %red = call i64 @llvm.vector.reduce.xor.nxv4i64(<vscale x 4 x i64> %v)
-  ret i64 %red
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll
similarity index 63%
rename from llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll
index 439c9d4bb58d5..04b28325aab9e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll
@@ -1,5 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
 
 declare i8 @llvm.vector.reduce.add.nxv1i8(<vscale x 1 x i8>)
 
@@ -461,16 +464,27 @@ define signext i16 @vreduce_umin_nxv1i16(<vscale x 1 x i16> %v) {
 declare i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16>)
 
 define signext i16 @vreduce_smin_nxv1i16(<vscale x 1 x i16> %v) {
-; CHECK-LABEL: vreduce_smin_nxv1i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addiw a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv1i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 8
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, a0
+; RV32-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv1i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 8
+; RV64-NEXT:    addiw a0, a0, -1
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i16 @llvm.vector.reduce.smin.nxv1i16(<vscale x 1 x i16> %v)
   ret i16 %red
 }
@@ -614,16 +628,27 @@ define signext i16 @vreduce_umin_nxv2i16(<vscale x 2 x i16> %v) {
 declare i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16>)
 
 define signext i16 @vreduce_smin_nxv2i16(<vscale x 2 x i16> %v) {
-; CHECK-LABEL: vreduce_smin_nxv2i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addiw a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv2i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 8
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, a0
+; RV32-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv2i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 8
+; RV64-NEXT:    addiw a0, a0, -1
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i16 @llvm.vector.reduce.smin.nxv2i16(<vscale x 2 x i16> %v)
   ret i16 %red
 }
@@ -767,16 +792,27 @@ define signext i16 @vreduce_umin_nxv4i16(<vscale x 4 x i16> %v) {
 declare i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16>)
 
 define signext i16 @vreduce_smin_nxv4i16(<vscale x 4 x i16> %v) {
-; CHECK-LABEL: vreduce_smin_nxv4i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 8
-; CHECK-NEXT:    addiw a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv4i16:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 8
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, a0
+; RV32-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv4i16:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 8
+; RV64-NEXT:    addiw a0, a0, -1
+; RV64-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i16 @llvm.vector.reduce.smin.nxv4i16(<vscale x 4 x i16> %v)
   ret i16 %red
 }
@@ -920,16 +956,27 @@ define signext i32 @vreduce_umin_nxv1i32(<vscale x 1 x i32> %v) {
 declare i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32>)
 
 define signext i32 @vreduce_smin_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vreduce_smin_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addiw a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, a0
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 524288
+; RV64-NEXT:    addiw a0, a0, -1
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i32 @llvm.vector.reduce.smin.nxv1i32(<vscale x 1 x i32> %v)
   ret i32 %red
 }
@@ -1073,16 +1120,27 @@ define signext i32 @vreduce_umin_nxv2i32(<vscale x 2 x i32> %v) {
 declare i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32>)
 
 define signext i32 @vreduce_smin_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vreduce_smin_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addiw a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, a0
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 524288
+; RV64-NEXT:    addiw a0, a0, -1
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i32 @llvm.vector.reduce.smin.nxv2i32(<vscale x 2 x i32> %v)
   ret i32 %red
 }
@@ -1226,16 +1284,27 @@ define signext i32 @vreduce_umin_nxv4i32(<vscale x 4 x i32> %v) {
 declare i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32>)
 
 define signext i32 @vreduce_smin_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vreduce_smin_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    lui a0, 524288
-; CHECK-NEXT:    addiw a0, a0, -1
-; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v10, a0
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lui a0, 524288
+; RV64-NEXT:    addiw a0, a0, -1
+; RV64-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, a0
+; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i32 @llvm.vector.reduce.smin.nxv4i32(<vscale x 4 x i32> %v)
   ret i32 %red
 }
@@ -1288,43 +1357,84 @@ define signext i32 @vreduce_xor_nxv4i32(<vscale x 4 x i32> %v) {
 declare i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64>)
 
 define i64 @vreduce_add_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_add_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_add_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vredsum.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vredsum.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64> %v)
   ret i64 %red
 }
 
 define i64 @vwreduce_add_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vwreduce_add_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vwreduce_add_nxv1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vwredsum.vs v8, v8, v9
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwreduce_add_nxv1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, zero
+; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vwredsum.vs v8, v8, v9
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %e = sext <vscale x 1 x i32> %v to <vscale x 1 x i64>
   %red = call i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64> %e)
   ret i64 %red
 }
 
 define i64 @vwreduce_uadd_nxv1i32(<vscale x 1 x i32> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv1i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
-; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vwreduce_uadd_nxv1i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV32-NEXT:    vwredsumu.vs v8, v8, v9
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwreduce_uadd_nxv1i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, zero
+; RV64-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; RV64-NEXT:    vwredsumu.vs v8, v8, v9
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, mu
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %e = zext <vscale x 1 x i32> %v to <vscale x 1 x i64>
   %red = call i64 @llvm.vector.reduce.add.nxv1i64(<vscale x 1 x i64> %e)
   ret i64 %red
@@ -1333,14 +1443,27 @@ define i64 @vwreduce_uadd_nxv1i32(<vscale x 1 x i32> %v) {
 declare i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64>)
 
 define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_umax_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_umax_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vredmaxu.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_umax_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vredmaxu.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umax.nxv1i64(<vscale x 1 x i64> %v)
   ret i64 %red
 }
@@ -1348,16 +1471,36 @@ define i64 @vreduce_umax_nxv1i64(<vscale x 1 x i64> %v) {
 declare i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64>)
 
 define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_smax_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    slli a0, a0, 63
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smax_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vredmax.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smax_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vredmax.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smax.nxv1i64(<vscale x 1 x i64> %v)
   ret i64 %red
 }
@@ -1365,14 +1508,27 @@ define i64 @vreduce_smax_nxv1i64(<vscale x 1 x i64> %v) {
 declare i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64>)
 
 define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_umin_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_umin_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.v.i v9, -1
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vredminu.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_umin_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.v.i v9, -1
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vredminu.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umin.nxv1i64(<vscale x 1 x i64> %v)
   ret i64 %red
 }
@@ -1380,16 +1536,38 @@ define i64 @vreduce_umin_nxv1i64(<vscale x 1 x i64> %v) {
 declare i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64>)
 
 define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_smin_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, -1
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v9, (a0), zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    srli a0, a0, 1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smin.nxv1i64(<vscale x 1 x i64> %v)
   ret i64 %red
 }
@@ -1397,14 +1575,27 @@ define i64 @vreduce_smin_nxv1i64(<vscale x 1 x i64> %v) {
 declare i64 @llvm.vector.reduce.and.nxv1i64(<vscale x 1 x i64>)
 
 define i64 @vreduce_and_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_and_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v9, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_and_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.v.i v9, -1
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vredand.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_and_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.v.i v9, -1
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vredand.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.and.nxv1i64(<vscale x 1 x i64> %v)
   ret i64 %red
 }
@@ -1412,14 +1603,27 @@ define i64 @vreduce_and_nxv1i64(<vscale x 1 x i64> %v) {
 declare i64 @llvm.vector.reduce.or.nxv1i64(<vscale x 1 x i64>)
 
 define i64 @vreduce_or_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_or_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_or_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vredor.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_or_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vredor.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.or.nxv1i64(<vscale x 1 x i64> %v)
   ret i64 %red
 }
@@ -1427,14 +1631,27 @@ define i64 @vreduce_or_nxv1i64(<vscale x 1 x i64> %v) {
 declare i64 @llvm.vector.reduce.xor.nxv1i64(<vscale x 1 x i64>)
 
 define i64 @vreduce_xor_nxv1i64(<vscale x 1 x i64> %v) {
-; CHECK-LABEL: vreduce_xor_nxv1i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v9
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_xor_nxv1i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV32-NEXT:    vredxor.vs v8, v8, v9
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv1i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
+; RV64-NEXT:    vredxor.vs v8, v8, v9
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.xor.nxv1i64(<vscale x 1 x i64> %v)
   ret i64 %red
 }
@@ -1442,43 +1659,84 @@ define i64 @vreduce_xor_nxv1i64(<vscale x 1 x i64> %v) {
 declare i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64>)
 
 define i64 @vreduce_add_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_add_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_add_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v10, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vredsum.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vredsum.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %v)
   ret i64 %red
 }
 
 define i64 @vwreduce_add_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vwreduce_add_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vwreduce_add_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vwredsum.vs v8, v8, v9
+; RV32-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwreduce_add_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, zero
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vwredsum.vs v8, v8, v9
+; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %e = sext <vscale x 2 x i32> %v to <vscale x 2 x i64>
   %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %e)
   ret i64 %red
 }
 
 define i64 @vwreduce_uadd_nxv2i32(<vscale x 2 x i32> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv2i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v9, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v9
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vwreduce_uadd_nxv2i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v9, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV32-NEXT:    vwredsumu.vs v8, v8, v9
+; RV32-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwreduce_uadd_nxv2i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v9, zero
+; RV64-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; RV64-NEXT:    vwredsumu.vs v8, v8, v9
+; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %e = zext <vscale x 2 x i32> %v to <vscale x 2 x i64>
   %red = call i64 @llvm.vector.reduce.add.nxv2i64(<vscale x 2 x i64> %e)
   ret i64 %red
@@ -1487,14 +1745,27 @@ define i64 @vwreduce_uadd_nxv2i32(<vscale x 2 x i32> %v) {
 declare i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64>)
 
 define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_umax_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_umax_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v10, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vredmaxu.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_umax_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vredmaxu.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umax.nxv2i64(<vscale x 2 x i64> %v)
   ret i64 %red
 }
@@ -1502,16 +1773,36 @@ define i64 @vreduce_umax_nxv2i64(<vscale x 2 x i64> %v) {
 declare i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64>)
 
 define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_smax_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    slli a0, a0, 63
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smax_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vredmax.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smax_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vredmax.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smax.nxv2i64(<vscale x 2 x i64> %v)
   ret i64 %red
 }
@@ -1519,14 +1810,27 @@ define i64 @vreduce_smax_nxv2i64(<vscale x 2 x i64> %v) {
 declare i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64>)
 
 define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_umin_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v10, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_umin_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.v.i v10, -1
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vredminu.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_umin_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.v.i v10, -1
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vredminu.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umin.nxv2i64(<vscale x 2 x i64> %v)
   ret i64 %red
 }
@@ -1534,16 +1838,38 @@ define i64 @vreduce_umin_nxv2i64(<vscale x 2 x i64> %v) {
 declare i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64>)
 
 define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_smin_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, -1
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v10, (a0), zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    srli a0, a0, 1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smin.nxv2i64(<vscale x 2 x i64> %v)
   ret i64 %red
 }
@@ -1551,14 +1877,27 @@ define i64 @vreduce_smin_nxv2i64(<vscale x 2 x i64> %v) {
 declare i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64>)
 
 define i64 @vreduce_and_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_and_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v10, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_and_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.v.i v10, -1
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vredand.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_and_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.v.i v10, -1
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vredand.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.and.nxv2i64(<vscale x 2 x i64> %v)
   ret i64 %red
 }
@@ -1566,14 +1905,27 @@ define i64 @vreduce_and_nxv2i64(<vscale x 2 x i64> %v) {
 declare i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64>)
 
 define i64 @vreduce_or_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_or_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_or_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v10, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vredor.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_or_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vredor.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.or.nxv2i64(<vscale x 2 x i64> %v)
   ret i64 %red
 }
@@ -1581,14 +1933,27 @@ define i64 @vreduce_or_nxv2i64(<vscale x 2 x i64> %v) {
 declare i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64>)
 
 define i64 @vreduce_xor_nxv2i64(<vscale x 2 x i64> %v) {
-; CHECK-LABEL: vreduce_xor_nxv2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v10
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_xor_nxv2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v10, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV32-NEXT:    vredxor.vs v8, v8, v10
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; RV64-NEXT:    vredxor.vs v8, v8, v10
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.xor.nxv2i64(<vscale x 2 x i64> %v)
   ret i64 %red
 }
@@ -1596,43 +1961,84 @@ define i64 @vreduce_xor_nxv2i64(<vscale x 2 x i64> %v) {
 declare i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64>)
 
 define i64 @vreduce_add_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_add_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredsum.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_add_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v12, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vredsum.vs v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_add_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v12, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vredsum.vs v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %v)
   ret i64 %red
 }
 
 define i64 @vwreduce_add_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vwreduce_add_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vwredsum.vs v8, v8, v10
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vwreduce_add_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v10, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV32-NEXT:    vwredsum.vs v8, v8, v10
+; RV32-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwreduce_add_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, zero
+; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV64-NEXT:    vwredsum.vs v8, v8, v10
+; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %e = sext <vscale x 4 x i32> %v to <vscale x 4 x i64>
   %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %e)
   ret i64 %red
 }
 
 define i64 @vwreduce_uadd_nxv4i32(<vscale x 4 x i32> %v) {
-; CHECK-LABEL: vwreduce_uadd_nxv4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v10, zero
-; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
-; CHECK-NEXT:    vwredsumu.vs v8, v8, v10
-; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vwreduce_uadd_nxv4i32:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v10, zero
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV32-NEXT:    vwredsumu.vs v8, v8, v10
+; RV32-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vwreduce_uadd_nxv4i32:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v10, zero
+; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV64-NEXT:    vwredsumu.vs v8, v8, v10
+; RV64-NEXT:    vsetivli zero, 0, e64, m1, ta, mu
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %e = zext <vscale x 4 x i32> %v to <vscale x 4 x i64>
   %red = call i64 @llvm.vector.reduce.add.nxv4i64(<vscale x 4 x i64> %e)
   ret i64 %red
@@ -1641,14 +2047,27 @@ define i64 @vwreduce_uadd_nxv4i32(<vscale x 4 x i32> %v) {
 declare i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64>)
 
 define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_umax_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredmaxu.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_umax_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v12, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vredmaxu.vs v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_umax_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v12, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vredmaxu.vs v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umax.nxv4i64(<vscale x 4 x i64> %v)
   ret i64 %red
 }
@@ -1656,16 +2075,36 @@ define i64 @vreduce_umax_nxv4i64(<vscale x 4 x i64> %v) {
 declare i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64>)
 
 define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_smax_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    slli a0, a0, 63
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredmax.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smax_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    sw zero, 8(sp)
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vredmax.vs v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smax_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v12, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vredmax.vs v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smax.nxv4i64(<vscale x 4 x i64> %v)
   ret i64 %red
 }
@@ -1673,14 +2112,27 @@ define i64 @vreduce_smax_nxv4i64(<vscale x 4 x i64> %v) {
 declare i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64>)
 
 define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_umin_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v12, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredminu.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_umin_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.v.i v12, -1
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vredminu.vs v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_umin_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.v.i v12, -1
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vredminu.vs v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.umin.nxv4i64(<vscale x 4 x i64> %v)
   ret i64 %red
 }
@@ -1688,16 +2140,38 @@ define i64 @vreduce_umin_nxv4i64(<vscale x 4 x i64> %v) {
 declare i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64>)
 
 define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_smin_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    li a0, -1
-; CHECK-NEXT:    srli a0, a0, 1
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, a0
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredmin.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_smin_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    li a0, -1
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    lui a0, 524288
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    sw a0, 12(sp)
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    addi a0, sp, 8
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vredmin.vs v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_smin_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    li a0, -1
+; RV64-NEXT:    srli a0, a0, 1
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v12, a0
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vredmin.vs v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.smin.nxv4i64(<vscale x 4 x i64> %v)
   ret i64 %red
 }
@@ -1705,14 +2179,27 @@ define i64 @vreduce_smin_nxv4i64(<vscale x 4 x i64> %v) {
 declare i64 @llvm.vector.reduce.and.nxv4i64(<vscale x 4 x i64>)
 
 define i64 @vreduce_and_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_and_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.v.i v12, -1
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredand.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_and_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.v.i v12, -1
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vredand.vs v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_and_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.v.i v12, -1
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vredand.vs v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.and.nxv4i64(<vscale x 4 x i64> %v)
   ret i64 %red
 }
@@ -1720,14 +2207,27 @@ define i64 @vreduce_and_nxv4i64(<vscale x 4 x i64> %v) {
 declare i64 @llvm.vector.reduce.or.nxv4i64(<vscale x 4 x i64>)
 
 define i64 @vreduce_or_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_or_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredor.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_or_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v12, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vredor.vs v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_or_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v12, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vredor.vs v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.or.nxv4i64(<vscale x 4 x i64> %v)
   ret i64 %red
 }
@@ -1735,14 +2235,27 @@ define i64 @vreduce_or_nxv4i64(<vscale x 4 x i64> %v) {
 declare i64 @llvm.vector.reduce.xor.nxv4i64(<vscale x 4 x i64>)
 
 define i64 @vreduce_xor_nxv4i64(<vscale x 4 x i64> %v) {
-; CHECK-LABEL: vreduce_xor_nxv4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT:    vmv.s.x v12, zero
-; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
-; CHECK-NEXT:    vredxor.vs v8, v8, v12
-; CHECK-NEXT:    vmv.x.s a0, v8
-; CHECK-NEXT:    ret
+; RV32-LABEL: vreduce_xor_nxv4i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vmv.s.x v12, zero
+; RV32-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV32-NEXT:    vredxor.vs v8, v8, v12
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: vreduce_xor_nxv4i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT:    vmv.s.x v12, zero
+; RV64-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
+; RV64-NEXT:    vredxor.vs v8, v8, v12
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:    ret
   %red = call i64 @llvm.vector.reduce.xor.nxv4i64(<vscale x 4 x i64> %v)
   ret i64 %red
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
deleted file mode 100644
index 62c26f168d327..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vse-rv64.ll
+++ /dev/null
@@ -1,1557 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+zfh,+experimental-zvfh \
-; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare void @llvm.riscv.vse.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vse64.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vse32.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vse16.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i1> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  i64);
-
-define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0)
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    i64 %2)
-
-  ret void
-}
-
-declare void @llvm.riscv.vse.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i1>,
-  i64);
-
-define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vse8.v v8, (a0), v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vse.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i1> %2,
-    i64 %3)
-
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vse.ll
similarity index 84%
rename from llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vse.ll
index 886d0fbb85bda..aaee4e2010236 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vse-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vse.ll
@@ -1,13 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -mattr=+zfh,+experimental-zvfh \
-; RUN:   -mattr=+d -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare void @llvm.riscv.vse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -17,7 +18,7 @@ entry:
   call void @llvm.riscv.vse.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -26,9 +27,9 @@ declare void @llvm.riscv.vse.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -39,7 +40,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -47,9 +48,9 @@ entry:
 declare void @llvm.riscv.vse.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -59,7 +60,7 @@ entry:
   call void @llvm.riscv.vse.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -68,9 +69,9 @@ declare void @llvm.riscv.vse.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -81,7 +82,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -89,9 +90,9 @@ entry:
 declare void @llvm.riscv.vse.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -101,7 +102,7 @@ entry:
   call void @llvm.riscv.vse.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -110,9 +111,9 @@ declare void @llvm.riscv.vse.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -123,7 +124,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -131,9 +132,9 @@ entry:
 declare void @llvm.riscv.vse.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -143,7 +144,7 @@ entry:
   call void @llvm.riscv.vse.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -152,9 +153,9 @@ declare void @llvm.riscv.vse.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -165,7 +166,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -173,9 +174,9 @@ entry:
 declare void @llvm.riscv.vse.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -185,7 +186,7 @@ entry:
   call void @llvm.riscv.vse.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -194,9 +195,9 @@ declare void @llvm.riscv.vse.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -207,7 +208,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -215,9 +216,9 @@ entry:
 declare void @llvm.riscv.vse.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -227,7 +228,7 @@ entry:
   call void @llvm.riscv.vse.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -236,9 +237,9 @@ declare void @llvm.riscv.vse.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -249,7 +250,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -257,9 +258,9 @@ entry:
 declare void @llvm.riscv.vse.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -269,7 +270,7 @@ entry:
   call void @llvm.riscv.vse.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -278,9 +279,9 @@ declare void @llvm.riscv.vse.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -291,7 +292,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -299,9 +300,9 @@ entry:
 declare void @llvm.riscv.vse.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -311,7 +312,7 @@ entry:
   call void @llvm.riscv.vse.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -320,9 +321,9 @@ declare void @llvm.riscv.vse.mask.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -333,7 +334,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -341,9 +342,9 @@ entry:
 declare void @llvm.riscv.vse.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -353,7 +354,7 @@ entry:
   call void @llvm.riscv.vse.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -362,9 +363,9 @@ declare void @llvm.riscv.vse.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -375,7 +376,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -383,9 +384,9 @@ entry:
 declare void @llvm.riscv.vse.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -395,7 +396,7 @@ entry:
   call void @llvm.riscv.vse.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -404,9 +405,9 @@ declare void @llvm.riscv.vse.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -417,7 +418,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -425,9 +426,9 @@ entry:
 declare void @llvm.riscv.vse.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -437,7 +438,7 @@ entry:
   call void @llvm.riscv.vse.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -446,9 +447,9 @@ declare void @llvm.riscv.vse.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -459,7 +460,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -467,9 +468,9 @@ entry:
 declare void @llvm.riscv.vse.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -479,7 +480,7 @@ entry:
   call void @llvm.riscv.vse.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -488,9 +489,9 @@ declare void @llvm.riscv.vse.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -501,7 +502,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -509,9 +510,9 @@ entry:
 declare void @llvm.riscv.vse.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -521,7 +522,7 @@ entry:
   call void @llvm.riscv.vse.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -530,9 +531,9 @@ declare void @llvm.riscv.vse.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -543,7 +544,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -551,9 +552,9 @@ entry:
 declare void @llvm.riscv.vse.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -563,7 +564,7 @@ entry:
   call void @llvm.riscv.vse.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -572,9 +573,9 @@ declare void @llvm.riscv.vse.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -585,7 +586,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -593,9 +594,9 @@ entry:
 declare void @llvm.riscv.vse.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -605,7 +606,7 @@ entry:
   call void @llvm.riscv.vse.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -614,9 +615,9 @@ declare void @llvm.riscv.vse.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -627,7 +628,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -635,9 +636,9 @@ entry:
 declare void @llvm.riscv.vse.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -647,7 +648,7 @@ entry:
   call void @llvm.riscv.vse.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -656,9 +657,9 @@ declare void @llvm.riscv.vse.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -669,7 +670,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -677,9 +678,9 @@ entry:
 declare void @llvm.riscv.vse.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -689,7 +690,7 @@ entry:
   call void @llvm.riscv.vse.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -698,9 +699,9 @@ declare void @llvm.riscv.vse.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -711,7 +712,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -719,9 +720,9 @@ entry:
 declare void @llvm.riscv.vse.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -731,7 +732,7 @@ entry:
   call void @llvm.riscv.vse.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -740,9 +741,9 @@ declare void @llvm.riscv.vse.mask.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -753,7 +754,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -761,9 +762,9 @@ entry:
 declare void @llvm.riscv.vse.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -773,7 +774,7 @@ entry:
   call void @llvm.riscv.vse.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -782,9 +783,9 @@ declare void @llvm.riscv.vse.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -795,7 +796,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -803,9 +804,9 @@ entry:
 declare void @llvm.riscv.vse.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -815,7 +816,7 @@ entry:
   call void @llvm.riscv.vse.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -824,9 +825,9 @@ declare void @llvm.riscv.vse.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -837,7 +838,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -845,9 +846,9 @@ entry:
 declare void @llvm.riscv.vse.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -857,7 +858,7 @@ entry:
   call void @llvm.riscv.vse.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -866,9 +867,9 @@ declare void @llvm.riscv.vse.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -879,7 +880,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -887,9 +888,9 @@ entry:
 declare void @llvm.riscv.vse.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -899,7 +900,7 @@ entry:
   call void @llvm.riscv.vse.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -908,9 +909,9 @@ declare void @llvm.riscv.vse.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -921,7 +922,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -929,9 +930,9 @@ entry:
 declare void @llvm.riscv.vse.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -941,7 +942,7 @@ entry:
   call void @llvm.riscv.vse.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -950,9 +951,9 @@ declare void @llvm.riscv.vse.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -963,7 +964,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -971,9 +972,9 @@ entry:
 declare void @llvm.riscv.vse.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -983,7 +984,7 @@ entry:
   call void @llvm.riscv.vse.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -992,9 +993,9 @@ declare void @llvm.riscv.vse.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1005,7 +1006,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1013,9 +1014,9 @@ entry:
 declare void @llvm.riscv.vse.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1025,7 +1026,7 @@ entry:
   call void @llvm.riscv.vse.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1034,9 +1035,9 @@ declare void @llvm.riscv.vse.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1047,7 +1048,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1055,9 +1056,9 @@ entry:
 declare void @llvm.riscv.vse.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1067,7 +1068,7 @@ entry:
   call void @llvm.riscv.vse.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1076,9 +1077,9 @@ declare void @llvm.riscv.vse.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1089,7 +1090,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1097,9 +1098,9 @@ entry:
 declare void @llvm.riscv.vse.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1109,7 +1110,7 @@ entry:
   call void @llvm.riscv.vse.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1118,9 +1119,9 @@ declare void @llvm.riscv.vse.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1131,7 +1132,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1139,9 +1140,9 @@ entry:
 declare void @llvm.riscv.vse.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1151,7 +1152,7 @@ entry:
   call void @llvm.riscv.vse.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1160,9 +1161,9 @@ declare void @llvm.riscv.vse.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1173,7 +1174,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1181,9 +1182,9 @@ entry:
 declare void @llvm.riscv.vse.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1193,7 +1194,7 @@ entry:
   call void @llvm.riscv.vse.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1202,9 +1203,9 @@ declare void @llvm.riscv.vse.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1215,7 +1216,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1223,9 +1224,9 @@ entry:
 declare void @llvm.riscv.vse.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1235,7 +1236,7 @@ entry:
   call void @llvm.riscv.vse.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1244,9 +1245,9 @@ declare void @llvm.riscv.vse.mask.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1257,7 +1258,7 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1265,9 +1266,9 @@ entry:
 declare void @llvm.riscv.vse.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1277,7 +1278,7 @@ entry:
   call void @llvm.riscv.vse.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1286,9 +1287,9 @@ declare void @llvm.riscv.vse.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1299,7 +1300,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1307,9 +1308,9 @@ entry:
 declare void @llvm.riscv.vse.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1319,7 +1320,7 @@ entry:
   call void @llvm.riscv.vse.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1328,9 +1329,9 @@ declare void @llvm.riscv.vse.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1341,7 +1342,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1349,9 +1350,9 @@ entry:
 declare void @llvm.riscv.vse.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1361,7 +1362,7 @@ entry:
   call void @llvm.riscv.vse.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1370,9 +1371,9 @@ declare void @llvm.riscv.vse.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1383,7 +1384,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1391,9 +1392,9 @@ entry:
 declare void @llvm.riscv.vse.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1403,7 +1404,7 @@ entry:
   call void @llvm.riscv.vse.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1412,9 +1413,9 @@ declare void @llvm.riscv.vse.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1425,7 +1426,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1433,9 +1434,9 @@ entry:
 declare void @llvm.riscv.vse.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1445,7 +1446,7 @@ entry:
   call void @llvm.riscv.vse.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1454,9 +1455,9 @@ declare void @llvm.riscv.vse.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1467,7 +1468,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1475,9 +1476,9 @@ entry:
 declare void @llvm.riscv.vse.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1487,7 +1488,7 @@ entry:
   call void @llvm.riscv.vse.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1496,9 +1497,9 @@ declare void @llvm.riscv.vse.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1509,7 +1510,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1517,9 +1518,9 @@ entry:
 declare void @llvm.riscv.vse.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2) nounwind {
+define void @intrinsic_vse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, iXLen %2) nounwind {
 ; CHECK-LABEL: intrinsic_vse_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1529,7 +1530,7 @@ entry:
   call void @llvm.riscv.vse.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8>* %1,
-    i32 %2)
+    iXLen %2)
 
   ret void
 }
@@ -1538,9 +1539,9 @@ declare void @llvm.riscv.vse.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define void @intrinsic_vse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vse_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -1551,7 +1552,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8>* %1,
     <vscale x 64 x i1> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
deleted file mode 100644
index f8f2325cdf628..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv32.ll
+++ /dev/null
@@ -1,1058 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsext.vf8 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsext.vf8 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsext.vf8 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vsext.vf8 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vsext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsext_vf4_nxv1i64(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsext.vf4 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vsext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsext_vf4_nxv2i64(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsext.vf4 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vsext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsext_vf4_nxv4i64(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsext.vf4 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vsext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsext_vf4_nxv8i64(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vsext.vf4 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vsext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vsext_vf4_nxv1i32(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsext.vf4 v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vsext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vsext_vf4_nxv2i32(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsext.vf4 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vsext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vsext_vf4_nxv4i32(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsext.vf4 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vsext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vsext_vf4_nxv8i32(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsext.vf4 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vsext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vsext_vf4_nxv16i32(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf4_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vsext.vf4 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vsext_mask_vf4_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vsext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vsext_vf2_nxv1i32(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vsext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vsext_vf2_nxv2i32(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsext.vf2 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vsext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vsext_vf2_nxv4i32(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vsext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vsext_vf2_nxv8i32(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsext.vf2 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vsext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vsext_vf2_nxv16i32(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vsext.vf2 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i32 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i32> @intrinsic_vsext_mask_vf2_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vsext_vf2_nxv1i16(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vsext.vf2 v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i32,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vsext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vsext_vf2_nxv2i16(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i32,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vsext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vsext_vf2_nxv4i16(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vsext.vf2 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i32,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vsext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vsext_vf2_nxv8i16(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i32,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vsext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vsext_vf2_nxv16i16(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vsext.vf2 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i32,
-  i32);
-
-define <vscale x 16 x i16> @intrinsic_vsext_mask_vf2_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vsext_vf2_nxv32i16(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsext_vf2_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vsext.vf2 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i32 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i32,
-  i32);
-
-define <vscale x 32 x i16> @intrinsic_vsext_mask_vf2_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vsext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %0,
-    i32 %3, i32 1)
-
-  ret <vscale x 32 x i16> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsext.ll
similarity index 90%
rename from llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsext.ll
index a15d195f6c55b..705f479d7c6e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -17,7 +19,7 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -40,7 +42,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -48,9 +50,9 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsext_vf8_nxv2i64(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsext_vf8_nxv2i64(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -70,10 +72,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -84,7 +86,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -92,9 +94,9 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsext_vf8_nxv4i64(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsext_vf8_nxv4i64(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -105,7 +107,7 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -114,10 +116,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -128,7 +130,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -136,9 +138,9 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsext_vf8_nxv8i64(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsext_vf8_nxv8i64(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -149,7 +151,7 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -158,10 +160,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -172,7 +174,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -180,9 +182,9 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsext_vf4_nxv1i64(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsext_vf4_nxv1i64(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -193,7 +195,7 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -202,10 +204,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -216,7 +218,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -224,9 +226,9 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsext_vf4_nxv2i64(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsext_vf4_nxv2i64(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -237,7 +239,7 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -246,10 +248,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -260,7 +262,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -268,9 +270,9 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsext_vf4_nxv4i64(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsext_vf4_nxv4i64(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -281,7 +283,7 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -290,10 +292,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -304,7 +306,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -312,9 +314,9 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsext_vf4_nxv8i64(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsext_vf4_nxv8i64(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -325,7 +327,7 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -348,7 +350,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -356,9 +358,9 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsext_vf4_nxv1i32(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsext_vf4_nxv1i32(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -369,7 +371,7 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -378,10 +380,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -392,7 +394,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -400,9 +402,9 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsext_vf4_nxv2i32(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsext_vf4_nxv2i32(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -413,7 +415,7 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -422,10 +424,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -436,7 +438,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -444,9 +446,9 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsext_vf4_nxv4i32(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsext_vf4_nxv4i32(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -457,7 +459,7 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -466,10 +468,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -480,7 +482,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -488,9 +490,9 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsext_vf4_nxv8i32(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsext_vf4_nxv8i32(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -501,7 +503,7 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -510,10 +512,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -532,9 +534,9 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsext_vf4_nxv16i32(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsext_vf4_nxv16i32(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf4_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -545,7 +547,7 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -554,10 +556,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsext_mask_vf4_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsext_mask_vf4_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf4_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -568,7 +570,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsext_vf2_nxv1i64(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsext_vf2_nxv1i64(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -589,7 +591,7 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -598,10 +600,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vsext.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vsext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -612,7 +614,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -620,9 +622,9 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsext_vf2_nxv2i64(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsext_vf2_nxv2i64(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -633,7 +635,7 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -642,10 +644,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vsext.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vsext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -664,9 +666,9 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsext_vf2_nxv4i64(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsext_vf2_nxv4i64(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -677,7 +679,7 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -686,10 +688,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vsext.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vsext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -700,7 +702,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -708,9 +710,9 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsext_vf2_nxv8i64(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsext_vf2_nxv8i64(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -721,7 +723,7 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -730,10 +732,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vsext.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vsext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -744,7 +746,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -752,9 +754,9 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsext_vf2_nxv1i32(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsext_vf2_nxv1i32(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -765,7 +767,7 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -774,10 +776,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsext.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vsext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -788,7 +790,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -796,9 +798,9 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsext_vf2_nxv2i32(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsext_vf2_nxv2i32(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -818,10 +820,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsext.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vsext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -832,7 +834,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -840,9 +842,9 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsext_vf2_nxv4i32(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsext_vf2_nxv4i32(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -853,7 +855,7 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -862,10 +864,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsext.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vsext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -876,7 +878,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -884,9 +886,9 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsext_vf2_nxv8i32(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsext_vf2_nxv8i32(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -897,7 +899,7 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -906,10 +908,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsext.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vsext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -920,7 +922,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -928,9 +930,9 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsext_vf2_nxv16i32(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsext_vf2_nxv16i32(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -941,7 +943,7 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -950,10 +952,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsext.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vsext_mask_vf2_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsext_mask_vf2_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -964,7 +966,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -972,9 +974,9 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsext_vf2_nxv1i16(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsext_vf2_nxv1i16(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -985,7 +987,7 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -994,10 +996,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vsext.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vsext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1016,9 +1018,9 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsext_vf2_nxv2i16(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsext_vf2_nxv2i16(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1029,7 +1031,7 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1038,10 +1040,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vsext.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vsext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1060,9 +1062,9 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsext_vf2_nxv4i16(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsext_vf2_nxv4i16(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1073,7 +1075,7 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1082,10 +1084,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vsext.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vsext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1096,7 +1098,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1104,9 +1106,9 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsext_vf2_nxv8i16(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsext_vf2_nxv8i16(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1117,7 +1119,7 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1126,10 +1128,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vsext.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vsext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1140,7 +1142,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1148,9 +1150,9 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsext_vf2_nxv16i16(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsext_vf2_nxv16i16(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1161,7 +1163,7 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1170,10 +1172,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vsext.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vsext_mask_vf2_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsext_mask_vf2_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1184,7 +1186,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1192,9 +1194,9 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
-  i64);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsext_vf2_nxv32i16(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsext_vf2_nxv32i16(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_vf2_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -1205,7 +1207,7 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
-    i64 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1214,10 +1216,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vsext.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vsext_mask_vf2_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsext_mask_vf2_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsext_mask_vf2_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -1228,7 +1230,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %0,
-    i64 %3, i64 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
index 3dc32128f6d52..2405c654c29fc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv64.ll
@@ -1,6 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh,+f -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
+
+; The intrinsics are not supported with RV32.
+
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -1288,4879 +1291,3 @@ entry:
 
   ret void
 }
-
-declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
index a4698391a9233..bc40db4dc014c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -28,9 +30,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i32(
   <vscale x 1 x i8>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -51,9 +53,9 @@ declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -74,9 +76,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i32(
   <vscale x 2 x i8>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -97,9 +99,9 @@ declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -120,9 +122,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i32(
   <vscale x 4 x i8>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -143,9 +145,9 @@ declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -166,9 +168,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i32(
   <vscale x 8 x i8>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -189,9 +191,9 @@ declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -212,9 +214,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i32(
   <vscale x 16 x i8>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -235,9 +237,9 @@ declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -258,9 +260,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -281,9 +283,9 @@ declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -304,9 +306,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -327,9 +329,9 @@ declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -350,9 +352,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -373,9 +375,9 @@ declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -396,9 +398,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -419,9 +421,9 @@ declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -442,9 +444,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -465,9 +467,9 @@ declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -488,9 +490,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -511,9 +513,9 @@ declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -534,9 +536,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -557,9 +559,9 @@ declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -580,9 +582,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -603,9 +605,9 @@ declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -626,9 +628,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -649,9 +651,9 @@ declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -672,9 +674,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -695,9 +697,9 @@ declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -718,9 +720,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -732,7 +734,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -741,9 +743,9 @@ declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -764,9 +766,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -778,7 +780,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -787,9 +789,9 @@ declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -810,9 +812,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -824,7 +826,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -833,9 +835,9 @@ declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -856,9 +858,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -870,7 +872,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -879,9 +881,9 @@ declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -902,9 +904,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -916,7 +918,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -925,9 +927,9 @@ declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -948,9 +950,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -962,7 +964,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -971,9 +973,9 @@ declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -994,9 +996,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1017,9 +1019,9 @@ declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1040,9 +1042,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1063,9 +1065,9 @@ declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1086,9 +1088,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1109,9 +1111,9 @@ declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1122,7 +1124,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1132,9 +1134,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1146,7 +1148,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1155,9 +1157,9 @@ declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1168,7 +1170,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1178,9 +1180,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1192,7 +1194,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1201,9 +1203,9 @@ declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1224,9 +1226,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1238,7 +1240,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1247,9 +1249,9 @@ declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1260,7 +1262,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1270,9 +1272,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1284,7 +1286,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1293,9 +1295,9 @@ declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1306,7 +1308,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1316,9 +1318,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1330,7 +1332,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1339,9 +1341,9 @@ declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1352,7 +1354,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1362,9 +1364,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1376,7 +1378,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1385,9 +1387,9 @@ declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1398,7 +1400,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1408,9 +1410,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1422,7 +1424,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1431,9 +1433,9 @@ declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1444,7 +1446,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1454,9 +1456,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1468,7 +1470,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1477,9 +1479,9 @@ declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1490,7 +1492,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1500,9 +1502,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1514,7 +1516,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1523,9 +1525,9 @@ declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1536,7 +1538,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1546,9 +1548,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1560,7 +1562,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1569,9 +1571,9 @@ declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1582,7 +1584,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1592,9 +1594,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1606,7 +1608,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1615,9 +1617,9 @@ declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1628,7 +1630,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1638,9 +1640,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1652,7 +1654,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1661,9 +1663,9 @@ declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1674,7 +1676,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1684,9 +1686,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1698,7 +1700,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1707,9 +1709,9 @@ declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1720,7 +1722,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1730,9 +1732,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1744,7 +1746,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1753,9 +1755,9 @@ declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1766,7 +1768,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1776,9 +1778,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1790,7 +1792,7 @@ entry:
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1799,9 +1801,9 @@ declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1812,7 +1814,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1822,9 +1824,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1836,7 +1838,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1845,9 +1847,9 @@ declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1858,7 +1860,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1868,9 +1870,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1882,7 +1884,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1891,9 +1893,9 @@ declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1904,7 +1906,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1914,9 +1916,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1928,7 +1930,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1937,9 +1939,9 @@ declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1950,7 +1952,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1960,9 +1962,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1974,7 +1976,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1983,9 +1985,9 @@ declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1996,7 +1998,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2006,9 +2008,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2020,7 +2022,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2029,9 +2031,9 @@ declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2042,7 +2044,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2052,9 +2054,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2066,7 +2068,7 @@ entry:
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2075,9 +2077,9 @@ declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2088,7 +2090,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2098,9 +2100,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2112,7 +2114,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2121,9 +2123,9 @@ declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2134,7 +2136,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2144,9 +2146,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2158,7 +2160,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2167,9 +2169,9 @@ declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2180,7 +2182,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2190,9 +2192,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2204,7 +2206,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2213,9 +2215,9 @@ declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2226,7 +2228,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2236,9 +2238,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2250,7 +2252,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2259,9 +2261,9 @@ declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2272,7 +2274,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2282,9 +2284,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2296,7 +2298,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2305,9 +2307,9 @@ declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2318,7 +2320,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2328,9 +2330,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2342,7 +2344,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2351,9 +2353,9 @@ declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2364,7 +2366,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2374,9 +2376,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2388,7 +2390,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2397,9 +2399,9 @@ declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2410,7 +2412,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2420,9 +2422,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2434,7 +2436,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2443,9 +2445,9 @@ declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2456,7 +2458,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2466,9 +2468,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2480,7 +2482,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2489,9 +2491,9 @@ declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2502,7 +2504,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2512,9 +2514,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2526,7 +2528,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2535,9 +2537,9 @@ declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2548,7 +2550,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2558,9 +2560,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2572,7 +2574,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2581,9 +2583,9 @@ declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2594,7 +2596,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2604,9 +2606,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2618,7 +2620,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2627,9 +2629,9 @@ declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2640,7 +2642,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2650,9 +2652,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2664,7 +2666,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2673,9 +2675,9 @@ declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2686,7 +2688,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2696,9 +2698,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2710,7 +2712,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2719,9 +2721,9 @@ declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2732,7 +2734,7 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
     <vscale x 32 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2742,9 +2744,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2756,7 +2758,7 @@ entry:
     <vscale x 32 x half>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2765,9 +2767,9 @@ declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2778,7 +2780,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2788,9 +2790,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2802,7 +2804,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2811,9 +2813,9 @@ declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2824,7 +2826,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2834,9 +2836,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2848,7 +2850,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2857,9 +2859,9 @@ declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2870,7 +2872,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2880,9 +2882,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2894,7 +2896,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2903,9 +2905,9 @@ declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2916,7 +2918,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2926,9 +2928,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2940,7 +2942,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2949,9 +2951,9 @@ declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2962,7 +2964,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2972,9 +2974,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2986,7 +2988,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2995,9 +2997,9 @@ declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3008,7 +3010,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3018,9 +3020,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i16(
   <vscale x 1 x double>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3032,7 +3034,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3041,9 +3043,9 @@ declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3054,7 +3056,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3064,9 +3066,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i16(
   <vscale x 2 x double>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3078,7 +3080,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3087,9 +3089,9 @@ declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3100,7 +3102,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3110,9 +3112,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i16(
   <vscale x 4 x double>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3124,7 +3126,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3133,9 +3135,9 @@ declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3146,7 +3148,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3156,9 +3158,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i16(
   <vscale x 8 x double>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3170,7 +3172,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3179,9 +3181,9 @@ declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3192,7 +3194,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3202,9 +3204,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3216,7 +3218,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3225,9 +3227,9 @@ declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3238,7 +3240,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3248,9 +3250,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3262,7 +3264,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3271,9 +3273,9 @@ declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3284,7 +3286,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3294,9 +3296,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3308,7 +3310,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3317,9 +3319,9 @@ declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3330,7 +3332,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3340,9 +3342,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3354,7 +3356,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3363,9 +3365,9 @@ declare void @llvm.riscv.vsoxei.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3376,7 +3378,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3386,9 +3388,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3400,7 +3402,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3409,9 +3411,9 @@ declare void @llvm.riscv.vsoxei.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3422,7 +3424,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3432,9 +3434,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3446,7 +3448,7 @@ entry:
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3455,9 +3457,9 @@ declare void @llvm.riscv.vsoxei.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3468,7 +3470,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8>* %1,
     <vscale x 64 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3478,9 +3480,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>*,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3492,7 +3494,7 @@ entry:
     <vscale x 64 x i8>* %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3501,9 +3503,9 @@ declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3514,7 +3516,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3524,9 +3526,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3538,7 +3540,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3547,9 +3549,9 @@ declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3560,7 +3562,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3570,9 +3572,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3584,7 +3586,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3593,9 +3595,9 @@ declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3606,7 +3608,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3616,9 +3618,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3630,7 +3632,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3639,9 +3641,9 @@ declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3652,7 +3654,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3662,9 +3664,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3676,7 +3678,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3685,9 +3687,9 @@ declare void @llvm.riscv.vsoxei.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3698,7 +3700,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3708,9 +3710,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3722,7 +3724,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3731,9 +3733,9 @@ declare void @llvm.riscv.vsoxei.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3744,7 +3746,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3754,9 +3756,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3768,7 +3770,7 @@ entry:
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3777,9 +3779,9 @@ declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3790,7 +3792,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3800,9 +3802,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3814,7 +3816,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3823,9 +3825,9 @@ declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -3836,7 +3838,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3846,9 +3848,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -3860,7 +3862,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3869,9 +3871,9 @@ declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -3882,7 +3884,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3892,9 +3894,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -3906,7 +3908,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3915,9 +3917,9 @@ declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3928,7 +3930,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3938,9 +3940,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3952,7 +3954,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3961,9 +3963,9 @@ declare void @llvm.riscv.vsoxei.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3974,7 +3976,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3984,9 +3986,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3998,7 +4000,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4007,9 +4009,9 @@ declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4020,7 +4022,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4030,9 +4032,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4044,7 +4046,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4053,9 +4055,9 @@ declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4066,7 +4068,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4076,9 +4078,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4090,7 +4092,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4099,9 +4101,9 @@ declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4112,7 +4114,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4122,9 +4124,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4136,7 +4138,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4145,9 +4147,9 @@ declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4158,7 +4160,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4168,9 +4170,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4182,7 +4184,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4191,9 +4193,9 @@ declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4204,7 +4206,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4214,9 +4216,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4228,7 +4230,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4237,9 +4239,9 @@ declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4250,7 +4252,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4260,9 +4262,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4274,7 +4276,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4283,9 +4285,9 @@ declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4296,7 +4298,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4306,9 +4308,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4320,7 +4322,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4329,9 +4331,9 @@ declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4342,7 +4344,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4352,9 +4354,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4366,7 +4368,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4375,9 +4377,9 @@ declare void @llvm.riscv.vsoxei.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4388,7 +4390,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4398,9 +4400,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4412,7 +4414,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4421,9 +4423,9 @@ declare void @llvm.riscv.vsoxei.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4434,7 +4436,7 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
     <vscale x 32 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4444,9 +4446,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4458,7 +4460,7 @@ entry:
     <vscale x 32 x half>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4467,9 +4469,9 @@ declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4480,7 +4482,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4490,9 +4492,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i8(
   <vscale x 1 x float>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4504,7 +4506,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4513,9 +4515,9 @@ declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4526,7 +4528,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4536,9 +4538,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i8(
   <vscale x 2 x float>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4550,7 +4552,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4559,9 +4561,9 @@ declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4572,7 +4574,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4582,9 +4584,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i8(
   <vscale x 4 x float>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4596,7 +4598,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4605,9 +4607,9 @@ declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4618,7 +4620,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4628,9 +4630,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i8(
   <vscale x 8 x float>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4642,7 +4644,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4651,9 +4653,9 @@ declare void @llvm.riscv.vsoxei.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4664,7 +4666,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4674,9 +4676,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv16f32.nxv16i8(
   <vscale x 16 x float>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4688,7 +4690,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4697,9 +4699,9 @@ declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4710,7 +4712,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4720,9 +4722,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i8(
   <vscale x 1 x double>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4734,7 +4736,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4743,9 +4745,9 @@ declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4756,7 +4758,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4766,9 +4768,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i8(
   <vscale x 2 x double>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4780,7 +4782,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4789,9 +4791,9 @@ declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4802,7 +4804,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4812,9 +4814,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i8(
   <vscale x 4 x double>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4826,7 +4828,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4835,9 +4837,9 @@ declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4848,7 +4850,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4858,9 +4860,9 @@ declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i8(
   <vscale x 8 x double>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4872,7 +4874,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
deleted file mode 100644
index a7cfe79fc4922..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsse-rv32.ll
+++ /dev/null
@@ -1,1704 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare void @llvm.riscv.vsse.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv1f64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv1f64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv2f64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv2f64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv4f64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv4f64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv8f64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT:    vsse64.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv8f64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv1f32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv1f32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv2f32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv2f32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv4f32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv4f32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv8f32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv8f32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv16f32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
-; CHECK-NEXT:    vsse32.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv16f32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  i32,
-  <vscale x 32 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv1f16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv1f16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv2f16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv2f16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv4f16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv4f16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv8f16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv8f16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv16f16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv16f16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv32f16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  i32,
-  <vscale x 32 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
-; CHECK-NEXT:    vsse16.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv32f16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  i32,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    i32 %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  i32,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    i32 %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  i32,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    i32 %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  i32,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    i32 %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  i32,
-  <vscale x 16 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    i32 %2,
-    <vscale x 16 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  i32,
-  <vscale x 32 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    i32 %2,
-    <vscale x 32 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  i32,
-  i32);
-
-define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    i32 %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsse.mask.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  i32,
-  <vscale x 64 x i1>,
-  i32);
-
-define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i32 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
-; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsse.mask.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    i32 %2,
-    <vscale x 64 x i1> %3,
-    i32 %4)
-
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsse.ll
similarity index 79%
rename from llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsse.ll
index 8e1150ea04a0a..7557dc770455c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsse-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsse.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare void @llvm.riscv.vsse.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -17,8 +19,8 @@ entry:
   call void @llvm.riscv.vsse.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -26,11 +28,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -40,9 +42,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv1i64(
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -50,10 +52,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -63,8 +65,8 @@ entry:
   call void @llvm.riscv.vsse.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -72,11 +74,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -86,9 +88,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv2i64(
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -96,10 +98,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -109,8 +111,8 @@ entry:
   call void @llvm.riscv.vsse.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -118,11 +120,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -132,9 +134,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv4i64(
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -142,10 +144,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -155,8 +157,8 @@ entry:
   call void @llvm.riscv.vsse.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -164,11 +166,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv8i64(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i64_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -178,9 +180,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv8i64(
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -188,10 +190,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -201,8 +203,8 @@ entry:
   call void @llvm.riscv.vsse.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -210,11 +212,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f64_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
@@ -224,9 +226,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv1f64(
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -234,10 +236,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -247,8 +249,8 @@ entry:
   call void @llvm.riscv.vsse.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -256,11 +258,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f64_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
@@ -270,9 +272,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv2f64(
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -280,10 +282,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -293,8 +295,8 @@ entry:
   call void @llvm.riscv.vsse.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -302,11 +304,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f64_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
@@ -316,9 +318,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv4f64(
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -326,10 +328,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -339,8 +341,8 @@ entry:
   call void @llvm.riscv.vsse.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -348,11 +350,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv8f64(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f64_nxv8f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
@@ -362,9 +364,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv8f64(
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -372,10 +374,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -385,8 +387,8 @@ entry:
   call void @llvm.riscv.vsse.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -394,11 +396,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -408,9 +410,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv1i32(
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -418,10 +420,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -431,8 +433,8 @@ entry:
   call void @llvm.riscv.vsse.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -440,11 +442,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -454,9 +456,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv2i32(
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -464,10 +466,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -477,8 +479,8 @@ entry:
   call void @llvm.riscv.vsse.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -486,11 +488,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -500,9 +502,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv4i32(
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -510,10 +512,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -523,8 +525,8 @@ entry:
   call void @llvm.riscv.vsse.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -532,11 +534,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -546,9 +548,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv8i32(
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -556,10 +558,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -569,8 +571,8 @@ entry:
   call void @llvm.riscv.vsse.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -578,11 +580,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -592,9 +594,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv16i32(
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -602,10 +604,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -615,8 +617,8 @@ entry:
   call void @llvm.riscv.vsse.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -624,11 +626,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f32_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, mu
@@ -638,9 +640,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv1f32(
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -648,10 +650,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -661,8 +663,8 @@ entry:
   call void @llvm.riscv.vsse.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -670,11 +672,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f32_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, mu
@@ -684,9 +686,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv2f32(
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -694,10 +696,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -707,8 +709,8 @@ entry:
   call void @llvm.riscv.vsse.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -716,11 +718,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f32_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, mu
@@ -730,9 +732,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv4f32(
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -740,10 +742,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -753,8 +755,8 @@ entry:
   call void @llvm.riscv.vsse.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -762,11 +764,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f32_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, mu
@@ -776,9 +778,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv8f32(
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -786,10 +788,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -799,8 +801,8 @@ entry:
   call void @llvm.riscv.vsse.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -808,11 +810,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv16f32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f32_nxv16f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m8, ta, mu
@@ -822,9 +824,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv16f32(
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -832,10 +834,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -845,8 +847,8 @@ entry:
   call void @llvm.riscv.vsse.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -854,11 +856,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -868,9 +870,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv1i16(
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -878,10 +880,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -891,8 +893,8 @@ entry:
   call void @llvm.riscv.vsse.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -900,11 +902,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -914,9 +916,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv2i16(
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -924,10 +926,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -937,8 +939,8 @@ entry:
   call void @llvm.riscv.vsse.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -946,11 +948,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -960,9 +962,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv4i16(
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -970,10 +972,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -983,8 +985,8 @@ entry:
   call void @llvm.riscv.vsse.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -992,11 +994,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1006,9 +1008,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv8i16(
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1016,10 +1018,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1029,8 +1031,8 @@ entry:
   call void @llvm.riscv.vsse.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1038,11 +1040,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1052,9 +1054,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv16i16(
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1062,10 +1064,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1075,8 +1077,8 @@ entry:
   call void @llvm.riscv.vsse.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1084,11 +1086,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1098,9 +1100,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv32i16(
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1108,10 +1110,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -1121,8 +1123,8 @@ entry:
   call void @llvm.riscv.vsse.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1130,11 +1132,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1f16_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, mu
@@ -1144,9 +1146,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv1f16(
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1154,10 +1156,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -1167,8 +1169,8 @@ entry:
   call void @llvm.riscv.vsse.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1176,11 +1178,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2f16_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, mu
@@ -1190,9 +1192,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv2f16(
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1200,10 +1202,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -1213,8 +1215,8 @@ entry:
   call void @llvm.riscv.vsse.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1222,11 +1224,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4f16_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, mu
@@ -1236,9 +1238,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv4f16(
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1246,10 +1248,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1259,8 +1261,8 @@ entry:
   call void @llvm.riscv.vsse.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1268,11 +1270,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8f16_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, mu
@@ -1282,9 +1284,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv8f16(
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1292,10 +1294,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1305,8 +1307,8 @@ entry:
   call void @llvm.riscv.vsse.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1314,11 +1316,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16f16_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
@@ -1328,9 +1330,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv16f16(
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1338,10 +1340,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1351,8 +1353,8 @@ entry:
   call void @llvm.riscv.vsse.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1360,11 +1362,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv32f16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32f16_nxv32f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m8, ta, mu
@@ -1374,9 +1376,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv32f16(
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1384,10 +1386,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
@@ -1397,8 +1399,8 @@ entry:
   call void @llvm.riscv.vsse.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1406,11 +1408,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
-  i64,
+  iXLen,
   <vscale x 1 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, iXLen %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, mu
@@ -1420,9 +1422,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv1i8(
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1430,10 +1432,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
@@ -1443,8 +1445,8 @@ entry:
   call void @llvm.riscv.vsse.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1452,11 +1454,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
-  i64,
+  iXLen,
   <vscale x 2 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, iXLen %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, mu
@@ -1466,9 +1468,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv2i8(
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1476,10 +1478,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
@@ -1489,8 +1491,8 @@ entry:
   call void @llvm.riscv.vsse.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1498,11 +1500,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
-  i64,
+  iXLen,
   <vscale x 4 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, iXLen %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, mu
@@ -1512,9 +1514,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv4i8(
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1522,10 +1524,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
@@ -1535,8 +1537,8 @@ entry:
   call void @llvm.riscv.vsse.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1544,11 +1546,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
-  i64,
+  iXLen,
   <vscale x 8 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, iXLen %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, mu
@@ -1558,9 +1560,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv8i8(
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1568,10 +1570,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
@@ -1581,8 +1583,8 @@ entry:
   call void @llvm.riscv.vsse.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1590,11 +1592,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
-  i64,
+  iXLen,
   <vscale x 16 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, iXLen %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
@@ -1604,9 +1606,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv16i8(
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1614,10 +1616,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
@@ -1627,8 +1629,8 @@ entry:
   call void @llvm.riscv.vsse.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1636,11 +1638,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
-  i64,
+  iXLen,
   <vscale x 32 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, iXLen %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
@@ -1650,9 +1652,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv32i8(
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1660,10 +1662,10 @@ entry:
 declare void @llvm.riscv.vsse.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
-  i64,
-  i64);
+  iXLen,
+  iXLen);
 
-define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, i64 %3) nounwind {
+define void @intrinsic_vsse_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, iXLen %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
@@ -1673,8 +1675,8 @@ entry:
   call void @llvm.riscv.vsse.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8>* %1,
-    i64 %2,
-    i64 %3)
+    iXLen %2,
+    iXLen %3)
 
   ret void
 }
@@ -1682,11 +1684,11 @@ entry:
 declare void @llvm.riscv.vsse.mask.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
-  i64,
+  iXLen,
   <vscale x 64 x i1>,
-  i64);
+  iXLen);
 
-define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, i64 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define void @intrinsic_vsse_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, iXLen %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsse_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m8, ta, mu
@@ -1696,9 +1698,9 @@ entry:
   call void @llvm.riscv.vsse.mask.nxv64i8(
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8>* %1,
-    i64 %2,
+    iXLen %2,
     <vscale x 64 x i1> %3,
-    i64 %4)
+    iXLen %4)
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
index 2f1deb5266a2b..115b56cd50210 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv64.ll
@@ -1,6 +1,9 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zfh,+experimental-zvfh,+f -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
+
+; The intrinsics are not supported with RV32.
+
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
@@ -1288,4879 +1291,3 @@ entry:
 
   ret void
 }
-
-declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i32>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i32> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i32> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei32.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i16>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i16> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i16> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei16.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
-  <vscale x 16 x i8>,
-  <vscale x 16 x i8>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
-    <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
-  <vscale x 32 x i8>,
-  <vscale x 32 x i8>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
-    <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
-  <vscale x 64 x i8>,
-  <vscale x 64 x i8>*,
-  <vscale x 64 x i8>,
-  <vscale x 64 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
-    <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
-    <vscale x 64 x i8> %2,
-    <vscale x 64 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i16>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %0,
-    <vscale x 16 x i16>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i16>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %0,
-    <vscale x 32 x i16>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i32>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> %0,
-    <vscale x 16 x i32>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
-  <vscale x 16 x half>,
-  <vscale x 16 x half>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
-    <vscale x 16 x half> %0,
-    <vscale x 16 x half>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
-  <vscale x 32 x half>,
-  <vscale x 32 x half>*,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
-    <vscale x 32 x half> %0,
-    <vscale x 32 x half>* %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
-  <vscale x 16 x float>,
-  <vscale x 16 x float>*,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
-    <vscale x 16 x float> %0,
-    <vscale x 16 x float>* %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %3,
-    i64 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  i64);
-
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i8> %2,
-    i64 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei8.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %3,
-    i64 %4)
-
-  ret void
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
similarity index 89%
rename from llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
index 442d780fe9591..433886dbe553d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei.ll
@@ -1,13 +1,15 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zfh,+experimental-zvfh,+f -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+experimental-zvfh \
+; RUN:   -verify-machineinstrs -target-abi=lp64d | FileCheck %s
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -18,7 +20,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -28,9 +30,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i32(
   <vscale x 1 x i8>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -42,7 +44,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -51,9 +53,9 @@ declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i32(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -64,7 +66,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -74,9 +76,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i32(
   <vscale x 2 x i8>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -88,7 +90,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -97,9 +99,9 @@ declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i32(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -110,7 +112,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -120,9 +122,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i32(
   <vscale x 4 x i8>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -134,7 +136,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -143,9 +145,9 @@ declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i32(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -156,7 +158,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -166,9 +168,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i32(
   <vscale x 8 x i8>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -180,7 +182,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -189,9 +191,9 @@ declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i32(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -202,7 +204,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -212,9 +214,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i32(
   <vscale x 16 x i8>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -226,7 +228,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -235,9 +237,9 @@ declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i32(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -248,7 +250,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -258,9 +260,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i32(
   <vscale x 1 x i16>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -272,7 +274,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -281,9 +283,9 @@ declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i32(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -294,7 +296,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -304,9 +306,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i32(
   <vscale x 2 x i16>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -318,7 +320,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -327,9 +329,9 @@ declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i32(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -340,7 +342,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -350,9 +352,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i32(
   <vscale x 4 x i16>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -364,7 +366,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -373,9 +375,9 @@ declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i32(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -386,7 +388,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -396,9 +398,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i32(
   <vscale x 8 x i16>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -410,7 +412,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -419,9 +421,9 @@ declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i32(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -432,7 +434,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -442,9 +444,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i32(
   <vscale x 16 x i16>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -456,7 +458,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -465,9 +467,9 @@ declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -478,7 +480,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -488,9 +490,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -502,7 +504,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -511,9 +513,9 @@ declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -534,9 +536,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -548,7 +550,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -557,9 +559,9 @@ declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -570,7 +572,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -580,9 +582,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -594,7 +596,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -603,9 +605,9 @@ declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -616,7 +618,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -626,9 +628,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -640,7 +642,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -649,9 +651,9 @@ declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i32(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -662,7 +664,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -672,9 +674,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i32(
   <vscale x 16 x i32>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -686,7 +688,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -695,9 +697,9 @@ declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -708,7 +710,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -718,9 +720,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -732,7 +734,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -741,9 +743,9 @@ declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -754,7 +756,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -764,9 +766,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -778,7 +780,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -787,9 +789,9 @@ declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -800,7 +802,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -810,9 +812,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -824,7 +826,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -833,9 +835,9 @@ declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -846,7 +848,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -856,9 +858,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -870,7 +872,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -879,9 +881,9 @@ declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i32(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -892,7 +894,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -902,9 +904,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i32(
   <vscale x 1 x half>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -916,7 +918,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -925,9 +927,9 @@ declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i32(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -938,7 +940,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -948,9 +950,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i32(
   <vscale x 2 x half>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -962,7 +964,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -971,9 +973,9 @@ declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i32(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -984,7 +986,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -994,9 +996,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i32(
   <vscale x 4 x half>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1017,9 +1019,9 @@ declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i32(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1030,7 +1032,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1040,9 +1042,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i32(
   <vscale x 8 x half>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1054,7 +1056,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1063,9 +1065,9 @@ declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i32(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1076,7 +1078,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1086,9 +1088,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i32(
   <vscale x 16 x half>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1100,7 +1102,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1109,9 +1111,9 @@ declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i32(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1122,7 +1124,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1132,9 +1134,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i32(
   <vscale x 1 x float>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -1146,7 +1148,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1155,9 +1157,9 @@ declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i32(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1168,7 +1170,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1178,9 +1180,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i32(
   <vscale x 2 x float>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1192,7 +1194,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1201,9 +1203,9 @@ declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i32(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1214,7 +1216,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1224,9 +1226,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i32(
   <vscale x 4 x float>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1238,7 +1240,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1247,9 +1249,9 @@ declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i32(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1260,7 +1262,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1270,9 +1272,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i32(
   <vscale x 8 x float>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1284,7 +1286,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1293,9 +1295,9 @@ declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i32(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1306,7 +1308,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
     <vscale x 16 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1316,9 +1318,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i32(
   <vscale x 16 x float>*,
   <vscale x 16 x i32>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1330,7 +1332,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i32> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1339,9 +1341,9 @@ declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i32(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1352,7 +1354,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
     <vscale x 1 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1362,9 +1364,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i32(
   <vscale x 1 x double>*,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -1376,7 +1378,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1385,9 +1387,9 @@ declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i32(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1398,7 +1400,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
     <vscale x 2 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1408,9 +1410,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i32(
   <vscale x 2 x double>*,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -1422,7 +1424,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1431,9 +1433,9 @@ declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i32(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1444,7 +1446,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
     <vscale x 4 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1454,9 +1456,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i32(
   <vscale x 4 x double>*,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -1468,7 +1470,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1477,9 +1479,9 @@ declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i32(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1490,7 +1492,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
     <vscale x 8 x i32> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1500,9 +1502,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i32(
   <vscale x 8 x double>*,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -1514,7 +1516,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1523,9 +1525,9 @@ declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i16(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1536,7 +1538,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1546,9 +1548,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i16(
   <vscale x 1 x i8>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -1560,7 +1562,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1569,9 +1571,9 @@ declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i16(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1582,7 +1584,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1592,9 +1594,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i16(
   <vscale x 2 x i8>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -1606,7 +1608,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1615,9 +1617,9 @@ declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i16(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1628,7 +1630,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1638,9 +1640,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i16(
   <vscale x 4 x i8>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -1652,7 +1654,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1661,9 +1663,9 @@ declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i16(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1674,7 +1676,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1684,9 +1686,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i16(
   <vscale x 8 x i8>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -1698,7 +1700,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1707,9 +1709,9 @@ declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i16(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1720,7 +1722,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1730,9 +1732,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i16(
   <vscale x 16 x i8>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -1744,7 +1746,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1753,9 +1755,9 @@ declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i16(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1766,7 +1768,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1776,9 +1778,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i16(
   <vscale x 32 x i8>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -1790,7 +1792,7 @@ entry:
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1799,9 +1801,9 @@ declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1812,7 +1814,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1822,9 +1824,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1836,7 +1838,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1845,9 +1847,9 @@ declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1858,7 +1860,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1868,9 +1870,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1882,7 +1884,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1891,9 +1893,9 @@ declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1904,7 +1906,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1914,9 +1916,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1928,7 +1930,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1937,9 +1939,9 @@ declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1950,7 +1952,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -1960,9 +1962,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1974,7 +1976,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -1983,9 +1985,9 @@ declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1996,7 +1998,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2006,9 +2008,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2020,7 +2022,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2029,9 +2031,9 @@ declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i16(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2042,7 +2044,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2052,9 +2054,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i16(
   <vscale x 32 x i16>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2066,7 +2068,7 @@ entry:
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2075,9 +2077,9 @@ declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2088,7 +2090,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2098,9 +2100,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2112,7 +2114,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2121,9 +2123,9 @@ declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2134,7 +2136,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2144,9 +2146,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2158,7 +2160,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2167,9 +2169,9 @@ declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2180,7 +2182,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2190,9 +2192,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2204,7 +2206,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2213,9 +2215,9 @@ declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2226,7 +2228,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2236,9 +2238,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2250,7 +2252,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2259,9 +2261,9 @@ declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2272,7 +2274,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2282,9 +2284,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2296,7 +2298,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2305,9 +2307,9 @@ declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2318,7 +2320,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2328,9 +2330,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -2342,7 +2344,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2351,9 +2353,9 @@ declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2364,7 +2366,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2374,9 +2376,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -2388,7 +2390,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2397,9 +2399,9 @@ declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2410,7 +2412,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2420,9 +2422,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -2434,7 +2436,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2443,9 +2445,9 @@ declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2456,7 +2458,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2466,9 +2468,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -2480,7 +2482,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2489,9 +2491,9 @@ declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i16(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2502,7 +2504,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2512,9 +2514,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i16(
   <vscale x 1 x half>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2526,7 +2528,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2535,9 +2537,9 @@ declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i16(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2548,7 +2550,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2558,9 +2560,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i16(
   <vscale x 2 x half>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2572,7 +2574,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2581,9 +2583,9 @@ declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i16(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2594,7 +2596,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2604,9 +2606,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i16(
   <vscale x 4 x half>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2618,7 +2620,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2627,9 +2629,9 @@ declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i16(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2640,7 +2642,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2650,9 +2652,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i16(
   <vscale x 8 x half>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2664,7 +2666,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2673,9 +2675,9 @@ declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i16(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2686,7 +2688,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2696,9 +2698,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i16(
   <vscale x 16 x half>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2710,7 +2712,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2719,9 +2721,9 @@ declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i16(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2732,7 +2734,7 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
     <vscale x 32 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2742,9 +2744,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i16(
   <vscale x 32 x half>*,
   <vscale x 32 x i16>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2756,7 +2758,7 @@ entry:
     <vscale x 32 x half>* %1,
     <vscale x 32 x i16> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2765,9 +2767,9 @@ declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i16(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2778,7 +2780,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2788,9 +2790,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i16(
   <vscale x 1 x float>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -2802,7 +2804,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2811,9 +2813,9 @@ declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i16(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2824,7 +2826,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2834,9 +2836,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i16(
   <vscale x 2 x float>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -2848,7 +2850,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2857,9 +2859,9 @@ declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i16(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2870,7 +2872,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2880,9 +2882,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i16(
   <vscale x 4 x float>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -2894,7 +2896,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2903,9 +2905,9 @@ declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i16(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2916,7 +2918,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2926,9 +2928,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i16(
   <vscale x 8 x float>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -2940,7 +2942,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2949,9 +2951,9 @@ declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i16(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2962,7 +2964,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
     <vscale x 16 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -2972,9 +2974,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i16(
   <vscale x 16 x float>*,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -2986,7 +2988,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -2995,9 +2997,9 @@ declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i16(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3008,7 +3010,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
     <vscale x 1 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3018,9 +3020,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i16(
   <vscale x 1 x double>*,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3032,7 +3034,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3041,9 +3043,9 @@ declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i16(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3054,7 +3056,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
     <vscale x 2 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3064,9 +3066,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i16(
   <vscale x 2 x double>*,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -3078,7 +3080,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3087,9 +3089,9 @@ declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i16(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3100,7 +3102,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
     <vscale x 4 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3110,9 +3112,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i16(
   <vscale x 4 x double>*,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -3124,7 +3126,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3133,9 +3135,9 @@ declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i16(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3146,7 +3148,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
     <vscale x 8 x i16> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3156,9 +3158,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i16(
   <vscale x 8 x double>*,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -3170,7 +3172,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3179,9 +3181,9 @@ declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3192,7 +3194,7 @@ entry:
     <vscale x 1 x i8> %0,
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3202,9 +3204,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -3216,7 +3218,7 @@ entry:
     <vscale x 1 x i8>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3225,9 +3227,9 @@ declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3238,7 +3240,7 @@ entry:
     <vscale x 2 x i8> %0,
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3248,9 +3250,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -3262,7 +3264,7 @@ entry:
     <vscale x 2 x i8>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3271,9 +3273,9 @@ declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3284,7 +3286,7 @@ entry:
     <vscale x 4 x i8> %0,
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3294,9 +3296,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -3308,7 +3310,7 @@ entry:
     <vscale x 4 x i8>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3317,9 +3319,9 @@ declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3330,7 +3332,7 @@ entry:
     <vscale x 8 x i8> %0,
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3340,9 +3342,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -3354,7 +3356,7 @@ entry:
     <vscale x 8 x i8>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3363,9 +3365,9 @@ declare void @llvm.riscv.vsuxei.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3376,7 +3378,7 @@ entry:
     <vscale x 16 x i8> %0,
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3386,9 +3388,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i8_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -3400,7 +3402,7 @@ entry:
     <vscale x 16 x i8>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3409,9 +3411,9 @@ declare void @llvm.riscv.vsuxei.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3422,7 +3424,7 @@ entry:
     <vscale x 32 x i8> %0,
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3432,9 +3434,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i8_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -3446,7 +3448,7 @@ entry:
     <vscale x 32 x i8>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3455,9 +3457,9 @@ declare void @llvm.riscv.vsuxei.nxv64i8.nxv64i8(
   <vscale x 64 x i8>,
   <vscale x 64 x i8>*,
   <vscale x 64 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3468,7 +3470,7 @@ entry:
     <vscale x 64 x i8> %0,
     <vscale x 64 x i8>* %1,
     <vscale x 64 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3478,9 +3480,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv64i8.nxv64i8(
   <vscale x 64 x i8>*,
   <vscale x 64 x i8>,
   <vscale x 64 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv64i8_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -3492,7 +3494,7 @@ entry:
     <vscale x 64 x i8>* %1,
     <vscale x 64 x i8> %2,
     <vscale x 64 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3501,9 +3503,9 @@ declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3514,7 +3516,7 @@ entry:
     <vscale x 1 x i16> %0,
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3524,9 +3526,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -3538,7 +3540,7 @@ entry:
     <vscale x 1 x i16>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3547,9 +3549,9 @@ declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3560,7 +3562,7 @@ entry:
     <vscale x 2 x i16> %0,
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3570,9 +3572,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -3584,7 +3586,7 @@ entry:
     <vscale x 2 x i16>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3593,9 +3595,9 @@ declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3606,7 +3608,7 @@ entry:
     <vscale x 4 x i16> %0,
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3616,9 +3618,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -3630,7 +3632,7 @@ entry:
     <vscale x 4 x i16>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3639,9 +3641,9 @@ declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3652,7 +3654,7 @@ entry:
     <vscale x 8 x i16> %0,
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3662,9 +3664,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -3676,7 +3678,7 @@ entry:
     <vscale x 8 x i16>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3685,9 +3687,9 @@ declare void @llvm.riscv.vsuxei.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3698,7 +3700,7 @@ entry:
     <vscale x 16 x i16> %0,
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3708,9 +3710,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i16_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -3722,7 +3724,7 @@ entry:
     <vscale x 16 x i16>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3731,9 +3733,9 @@ declare void @llvm.riscv.vsuxei.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i16>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3744,7 +3746,7 @@ entry:
     <vscale x 32 x i16> %0,
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3754,9 +3756,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32i16_nxv32i16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -3768,7 +3770,7 @@ entry:
     <vscale x 32 x i16>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3777,9 +3779,9 @@ declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3790,7 +3792,7 @@ entry:
     <vscale x 1 x i32> %0,
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3800,9 +3802,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -3814,7 +3816,7 @@ entry:
     <vscale x 1 x i32>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3823,9 +3825,9 @@ declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -3836,7 +3838,7 @@ entry:
     <vscale x 2 x i32> %0,
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3846,9 +3848,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -3860,7 +3862,7 @@ entry:
     <vscale x 2 x i32>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3869,9 +3871,9 @@ declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -3882,7 +3884,7 @@ entry:
     <vscale x 4 x i32> %0,
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3892,9 +3894,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -3906,7 +3908,7 @@ entry:
     <vscale x 4 x i32>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3915,9 +3917,9 @@ declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3928,7 +3930,7 @@ entry:
     <vscale x 8 x i32> %0,
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3938,9 +3940,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -3952,7 +3954,7 @@ entry:
     <vscale x 8 x i32>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -3961,9 +3963,9 @@ declare void @llvm.riscv.vsuxei.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
   <vscale x 16 x i32>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3974,7 +3976,7 @@ entry:
     <vscale x 16 x i32> %0,
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -3984,9 +3986,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16i32_nxv16i32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -3998,7 +4000,7 @@ entry:
     <vscale x 16 x i32>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4007,9 +4009,9 @@ declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4020,7 +4022,7 @@ entry:
     <vscale x 1 x i64> %0,
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4030,9 +4032,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4044,7 +4046,7 @@ entry:
     <vscale x 1 x i64>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4053,9 +4055,9 @@ declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4066,7 +4068,7 @@ entry:
     <vscale x 2 x i64> %0,
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4076,9 +4078,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4090,7 +4092,7 @@ entry:
     <vscale x 2 x i64>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4099,9 +4101,9 @@ declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4112,7 +4114,7 @@ entry:
     <vscale x 4 x i64> %0,
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4122,9 +4124,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4136,7 +4138,7 @@ entry:
     <vscale x 4 x i64>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4145,9 +4147,9 @@ declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
   <vscale x 8 x i64>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4158,7 +4160,7 @@ entry:
     <vscale x 8 x i64> %0,
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4168,9 +4170,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4182,7 +4184,7 @@ entry:
     <vscale x 8 x i64>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4191,9 +4193,9 @@ declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i8(
   <vscale x 1 x half>,
   <vscale x 1 x half>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4204,7 +4206,7 @@ entry:
     <vscale x 1 x half> %0,
     <vscale x 1 x half>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4214,9 +4216,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i8(
   <vscale x 1 x half>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -4228,7 +4230,7 @@ entry:
     <vscale x 1 x half>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4237,9 +4239,9 @@ declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i8(
   <vscale x 2 x half>,
   <vscale x 2 x half>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4250,7 +4252,7 @@ entry:
     <vscale x 2 x half> %0,
     <vscale x 2 x half>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4260,9 +4262,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i8(
   <vscale x 2 x half>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -4274,7 +4276,7 @@ entry:
     <vscale x 2 x half>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4283,9 +4285,9 @@ declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i8(
   <vscale x 4 x half>,
   <vscale x 4 x half>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4296,7 +4298,7 @@ entry:
     <vscale x 4 x half> %0,
     <vscale x 4 x half>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4306,9 +4308,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i8(
   <vscale x 4 x half>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -4320,7 +4322,7 @@ entry:
     <vscale x 4 x half>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4329,9 +4331,9 @@ declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i8(
   <vscale x 8 x half>,
   <vscale x 8 x half>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4342,7 +4344,7 @@ entry:
     <vscale x 8 x half> %0,
     <vscale x 8 x half>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4352,9 +4354,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i8(
   <vscale x 8 x half>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -4366,7 +4368,7 @@ entry:
     <vscale x 8 x half>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4375,9 +4377,9 @@ declare void @llvm.riscv.vsuxei.nxv16f16.nxv16i8(
   <vscale x 16 x half>,
   <vscale x 16 x half>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4388,7 +4390,7 @@ entry:
     <vscale x 16 x half> %0,
     <vscale x 16 x half>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4398,9 +4400,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16f16.nxv16i8(
   <vscale x 16 x half>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f16_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -4412,7 +4414,7 @@ entry:
     <vscale x 16 x half>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4421,9 +4423,9 @@ declare void @llvm.riscv.vsuxei.nxv32f16.nxv32i8(
   <vscale x 32 x half>,
   <vscale x 32 x half>*,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4434,7 +4436,7 @@ entry:
     <vscale x 32 x half> %0,
     <vscale x 32 x half>* %1,
     <vscale x 32 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4444,9 +4446,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv32f16.nxv32i8(
   <vscale x 32 x half>*,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv32f16_nxv32f16_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -4458,7 +4460,7 @@ entry:
     <vscale x 32 x half>* %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4467,9 +4469,9 @@ declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i8(
   <vscale x 1 x float>,
   <vscale x 1 x float>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4480,7 +4482,7 @@ entry:
     <vscale x 1 x float> %0,
     <vscale x 1 x float>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4490,9 +4492,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i8(
   <vscale x 1 x float>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -4504,7 +4506,7 @@ entry:
     <vscale x 1 x float>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4513,9 +4515,9 @@ declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i8(
   <vscale x 2 x float>,
   <vscale x 2 x float>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4526,7 +4528,7 @@ entry:
     <vscale x 2 x float> %0,
     <vscale x 2 x float>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4536,9 +4538,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i8(
   <vscale x 2 x float>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -4550,7 +4552,7 @@ entry:
     <vscale x 2 x float>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4559,9 +4561,9 @@ declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i8(
   <vscale x 4 x float>,
   <vscale x 4 x float>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4572,7 +4574,7 @@ entry:
     <vscale x 4 x float> %0,
     <vscale x 4 x float>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4582,9 +4584,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i8(
   <vscale x 4 x float>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -4596,7 +4598,7 @@ entry:
     <vscale x 4 x float>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4605,9 +4607,9 @@ declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i8(
   <vscale x 8 x float>,
   <vscale x 8 x float>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4618,7 +4620,7 @@ entry:
     <vscale x 8 x float> %0,
     <vscale x 8 x float>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4628,9 +4630,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i8(
   <vscale x 8 x float>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -4642,7 +4644,7 @@ entry:
     <vscale x 8 x float>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4651,9 +4653,9 @@ declare void @llvm.riscv.vsuxei.nxv16f32.nxv16i8(
   <vscale x 16 x float>,
   <vscale x 16 x float>*,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4664,7 +4666,7 @@ entry:
     <vscale x 16 x float> %0,
     <vscale x 16 x float>* %1,
     <vscale x 16 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4674,9 +4676,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv16f32.nxv16i8(
   <vscale x 16 x float>*,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv16f32_nxv16f32_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -4688,7 +4690,7 @@ entry:
     <vscale x 16 x float>* %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4697,9 +4699,9 @@ declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i8(
   <vscale x 1 x double>,
   <vscale x 1 x double>*,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4710,7 +4712,7 @@ entry:
     <vscale x 1 x double> %0,
     <vscale x 1 x double>* %1,
     <vscale x 1 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4720,9 +4722,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i8(
   <vscale x 1 x double>*,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -4734,7 +4736,7 @@ entry:
     <vscale x 1 x double>* %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4743,9 +4745,9 @@ declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i8(
   <vscale x 2 x double>,
   <vscale x 2 x double>*,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4756,7 +4758,7 @@ entry:
     <vscale x 2 x double> %0,
     <vscale x 2 x double>* %1,
     <vscale x 2 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4766,9 +4768,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i8(
   <vscale x 2 x double>*,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -4780,7 +4782,7 @@ entry:
     <vscale x 2 x double>* %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4789,9 +4791,9 @@ declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i8(
   <vscale x 4 x double>,
   <vscale x 4 x double>*,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4802,7 +4804,7 @@ entry:
     <vscale x 4 x double> %0,
     <vscale x 4 x double>* %1,
     <vscale x 4 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4812,9 +4814,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i8(
   <vscale x 4 x double>*,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -4826,7 +4828,7 @@ entry:
     <vscale x 4 x double>* %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }
@@ -4835,9 +4837,9 @@ declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i8(
   <vscale x 8 x double>,
   <vscale x 8 x double>*,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4848,7 +4850,7 @@ entry:
     <vscale x 8 x double> %0,
     <vscale x 8 x double>* %1,
     <vscale x 8 x i8> %2,
-    i32 %3)
+    iXLen %3)
 
   ret void
 }
@@ -4858,9 +4860,9 @@ declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i8(
   <vscale x 8 x double>*,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  iXLen);
 
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -4872,7 +4874,7 @@ entry:
     <vscale x 8 x double>* %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    iXLen %4)
 
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
deleted file mode 100644
index 40435dd5953fb..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv64.ll
+++ /dev/null
@@ -1,1234 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vzext.vf8 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vzext.vf8 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vzext.vf8 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vzext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vzext.vf8 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vzext.vf8 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vzext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vzext.vf8 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vzext.vf8 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vzext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vzext.vf8 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vzext.vf4 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vzext.vf4 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vzext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vzext.vf4 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vzext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vzext.vf4 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vzext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vzext.vf4 v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vzext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vzext.vf4 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vzext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vzext.vf4 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vzext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vzext.vf4 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vzext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf4_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vzext.vf4 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vzext_mask_vf4_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vzext.vf4 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vzext.vf2 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> undef,
-    <vscale x 1 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i32>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
-    <vscale x 1 x i64> %1,
-    <vscale x 1 x i32> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> undef,
-    <vscale x 2 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i32>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i64> @intrinsic_vzext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
-    <vscale x 2 x i64> %1,
-    <vscale x 2 x i32> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> undef,
-    <vscale x 4 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i32>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i64> @intrinsic_vzext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
-    <vscale x 4 x i64> %1,
-    <vscale x 4 x i32> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vzext.vf2 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> undef,
-    <vscale x 8 x i32> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i32>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i64> @intrinsic_vzext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
-    <vscale x 8 x i64> %1,
-    <vscale x 8 x i32> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> undef,
-    <vscale x 1 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i16>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i32> @intrinsic_vzext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
-    <vscale x 1 x i32> %1,
-    <vscale x 1 x i16> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vzext.vf2 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> undef,
-    <vscale x 2 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i16>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i32> @intrinsic_vzext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
-    <vscale x 2 x i32> %1,
-    <vscale x 2 x i16> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> undef,
-    <vscale x 4 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i16>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i32> @intrinsic_vzext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
-    <vscale x 4 x i32> %1,
-    <vscale x 4 x i16> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> undef,
-    <vscale x 8 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i16>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i32> @intrinsic_vzext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
-    <vscale x 8 x i32> %1,
-    <vscale x 8 x i16> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vzext.vf2 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> undef,
-    <vscale x 16 x i16> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
-  <vscale x 16 x i32>,
-  <vscale x 16 x i16>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i32> @intrinsic_vzext_mask_vf2_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i32:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
-    <vscale x 16 x i32> %1,
-    <vscale x 16 x i16> %2,
-    <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> undef,
-    <vscale x 1 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i8>,
-  <vscale x 1 x i1>,
-  i64,
-  i64);
-
-define <vscale x 1 x i16> @intrinsic_vzext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
-    <vscale x 1 x i16> %1,
-    <vscale x 1 x i8> %2,
-    <vscale x 1 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v9, v8
-; CHECK-NEXT:    vmv1r.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> undef,
-    <vscale x 2 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i8>,
-  <vscale x 2 x i1>,
-  i64,
-  i64);
-
-define <vscale x 2 x i16> @intrinsic_vzext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
-    <vscale x 2 x i16> %1,
-    <vscale x 2 x i8> %2,
-    <vscale x 2 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vzext.vf2 v9, v8
-; CHECK-NEXT:    vmv.v.v v8, v9
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> undef,
-    <vscale x 4 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i8>,
-  <vscale x 4 x i1>,
-  i64,
-  i64);
-
-define <vscale x 4 x i16> @intrinsic_vzext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
-    <vscale x 4 x i16> %1,
-    <vscale x 4 x i8> %2,
-    <vscale x 4 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v10, v8
-; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> undef,
-    <vscale x 8 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i8>,
-  <vscale x 8 x i1>,
-  i64,
-  i64);
-
-define <vscale x 8 x i16> @intrinsic_vzext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
-    <vscale x 8 x i16> %1,
-    <vscale x 8 x i8> %2,
-    <vscale x 8 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v12, v8
-; CHECK-NEXT:    vmv.v.v v8, v12
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> undef,
-    <vscale x 16 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
-  <vscale x 16 x i16>,
-  <vscale x 16 x i8>,
-  <vscale x 16 x i1>,
-  i64,
-  i64);
-
-define <vscale x 16 x i16> @intrinsic_vzext_mask_vf2_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
-    <vscale x 16 x i16> %1,
-    <vscale x 16 x i8> %2,
-    <vscale x 16 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vzext_vf2_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vzext.vf2 v16, v8
-; CHECK-NEXT:    vmv.v.v v8, v16
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> undef,
-    <vscale x 32 x i8> %0,
-    i64 %1)
-
-  ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
-  <vscale x 32 x i16>,
-  <vscale x 32 x i8>,
-  <vscale x 32 x i1>,
-  i64,
-  i64);
-
-define <vscale x 32 x i16> @intrinsic_vzext_mask_vf2_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv32i16:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vzext.vf2 v8, v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
-    <vscale x 32 x i16> %1,
-    <vscale x 32 x i8> %2,
-    <vscale x 32 x i1> %0,
-    i64 %3, i64 1)
-
-  ret <vscale x 32 x i16> %a
-}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vzext.ll
similarity index 90%
rename from llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vzext.ll
index 944140a1a5473..f38876c48822f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext.ll
@@ -1,12 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN:   < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK
 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -17,7 +19,7 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -26,10 +28,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i8(
   <vscale x 1 x i64>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf8_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -40,7 +42,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -48,9 +50,9 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -61,7 +63,7 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -70,10 +72,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i8(
   <vscale x 2 x i64>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vzext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vzext_mask_vf8_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -84,7 +86,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -92,9 +94,9 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -105,7 +107,7 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -114,10 +116,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i8(
   <vscale x 4 x i64>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vzext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vzext_mask_vf8_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -128,7 +130,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -136,9 +138,9 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -149,7 +151,7 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -158,10 +160,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i8(
   <vscale x 8 x i64>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vzext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vzext_mask_vf8_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -172,7 +174,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -180,9 +182,9 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -193,7 +195,7 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -202,10 +204,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i16(
   <vscale x 1 x i64>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf4_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -216,7 +218,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -224,9 +226,9 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -237,7 +239,7 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -246,10 +248,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i16(
   <vscale x 2 x i64>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vzext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vzext_mask_vf4_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -260,7 +262,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -268,9 +270,9 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -281,7 +283,7 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -290,10 +292,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i16(
   <vscale x 4 x i64>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vzext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vzext_mask_vf4_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -304,7 +306,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -312,9 +314,9 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -325,7 +327,7 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -334,10 +336,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i16(
   <vscale x 8 x i64>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vzext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vzext_mask_vf4_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -348,7 +350,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -356,9 +358,9 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -369,7 +371,7 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -378,10 +380,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i8(
   <vscale x 1 x i32>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vzext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vzext_mask_vf4_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -392,7 +394,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -400,9 +402,9 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -413,7 +415,7 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -422,10 +424,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i8(
   <vscale x 2 x i32>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vzext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vzext_mask_vf4_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -436,7 +438,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -444,9 +446,9 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -457,7 +459,7 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -466,10 +468,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i8(
   <vscale x 4 x i32>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vzext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vzext_mask_vf4_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -480,7 +482,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -488,9 +490,9 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -501,7 +503,7 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -510,10 +512,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i8(
   <vscale x 8 x i32>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vzext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vzext_mask_vf4_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -524,7 +526,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -532,9 +534,9 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf4_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -545,7 +547,7 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -554,10 +556,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i8(
   <vscale x 16 x i32>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vzext_mask_vf4_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vzext_mask_vf4_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -568,7 +570,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -576,9 +578,9 @@ entry:
 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -589,7 +591,7 @@ entry:
   %a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
     <vscale x 1 x i64> undef,
     <vscale x 1 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -598,10 +600,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vzext.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i64>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vzext_mask_vf2_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
@@ -612,7 +614,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i64> %a
 }
@@ -620,9 +622,9 @@ entry:
 declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -633,7 +635,7 @@ entry:
   %a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
     <vscale x 2 x i64> undef,
     <vscale x 2 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -642,10 +644,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vzext.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i64>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vzext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vzext_mask_vf2_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
@@ -656,7 +658,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i64> %a
 }
@@ -664,9 +666,9 @@ entry:
 declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -677,7 +679,7 @@ entry:
   %a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
     <vscale x 4 x i64> undef,
     <vscale x 4 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -686,10 +688,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vzext.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i64>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vzext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vzext_mask_vf2_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
@@ -700,7 +702,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i64> %a
 }
@@ -708,9 +710,9 @@ entry:
 declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -721,7 +723,7 @@ entry:
   %a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
     <vscale x 8 x i64> undef,
     <vscale x 8 x i32> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -730,10 +732,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vzext.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i64>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vzext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vzext_mask_vf2_nxv8i64(<vscale x 8 x i1> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
@@ -744,7 +746,7 @@ entry:
     <vscale x 8 x i64> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i64> %a
 }
@@ -752,9 +754,9 @@ entry:
 declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -765,7 +767,7 @@ entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
     <vscale x 1 x i32> undef,
     <vscale x 1 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -774,10 +776,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vzext.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i32>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vzext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vzext_mask_vf2_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
@@ -788,7 +790,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i32> %a
 }
@@ -796,9 +798,9 @@ entry:
 declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -809,7 +811,7 @@ entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
     <vscale x 2 x i32> undef,
     <vscale x 2 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -818,10 +820,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vzext.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i32>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vzext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vzext_mask_vf2_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
@@ -832,7 +834,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i32> %a
 }
@@ -840,9 +842,9 @@ entry:
 declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -853,7 +855,7 @@ entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
     <vscale x 4 x i32> undef,
     <vscale x 4 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -862,10 +864,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vzext.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i32>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vzext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vzext_mask_vf2_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
@@ -876,7 +878,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i32> %a
 }
@@ -884,9 +886,9 @@ entry:
 declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -897,7 +899,7 @@ entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
     <vscale x 8 x i32> undef,
     <vscale x 8 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -906,10 +908,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vzext.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i32>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vzext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vzext_mask_vf2_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
@@ -920,7 +922,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i32> %a
 }
@@ -928,9 +930,9 @@ entry:
 declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -941,7 +943,7 @@ entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
     <vscale x 16 x i32> undef,
     <vscale x 16 x i16> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -950,10 +952,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vzext.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i32>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vzext_mask_vf2_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vzext_mask_vf2_nxv16i32(<vscale x 16 x i1> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
@@ -964,7 +966,7 @@ entry:
     <vscale x 16 x i32> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i32> %a
 }
@@ -972,9 +974,9 @@ entry:
 declare <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -985,7 +987,7 @@ entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
     <vscale x 1 x i16> undef,
     <vscale x 1 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -994,10 +996,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vzext.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i16>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vzext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vzext_mask_vf2_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
@@ -1008,7 +1010,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1016,9 +1018,9 @@ entry:
 declare <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1029,7 +1031,7 @@ entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
     <vscale x 2 x i16> undef,
     <vscale x 2 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1038,10 +1040,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vzext.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i16>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vzext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vzext_mask_vf2_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
@@ -1052,7 +1054,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1060,9 +1062,9 @@ entry:
 declare <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1073,7 +1075,7 @@ entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
     <vscale x 4 x i16> undef,
     <vscale x 4 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1082,10 +1084,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vzext.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i16>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vzext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vzext_mask_vf2_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
@@ -1096,7 +1098,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1104,9 +1106,9 @@ entry:
 declare <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1117,7 +1119,7 @@ entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
     <vscale x 8 x i16> undef,
     <vscale x 8 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1126,10 +1128,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vzext.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i16>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vzext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vzext_mask_vf2_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
@@ -1140,7 +1142,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1148,9 +1150,9 @@ entry:
 declare <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1161,7 +1163,7 @@ entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
     <vscale x 16 x i16> undef,
     <vscale x 16 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1170,10 +1172,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vzext.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i16>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vzext_mask_vf2_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vzext_mask_vf2_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
@@ -1184,7 +1186,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1192,9 +1194,9 @@ entry:
 declare <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
-  i32);
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_vf2_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -1205,7 +1207,7 @@ entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
     <vscale x 32 x i16> undef,
     <vscale x 32 x i8> %0,
-    i32 %1)
+    iXLen %1)
 
   ret <vscale x 32 x i16> %a
 }
@@ -1214,10 +1216,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vzext.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i16>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32,
-  i32);
+  iXLen,
+  iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vzext_mask_vf2_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vzext_mask_vf2_nxv32i16(<vscale x 32 x i1> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
@@ -1228,7 +1230,7 @@ entry:
     <vscale x 32 x i16> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %0,
-    i32 %3, i32 1)
+    iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i16> %a
 }


        


More information about the llvm-commits mailing list